aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 23:53:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 23:53:45 -0400
commitcd6362befe4cc7bf589a5236d2a780af2d47bcc9 (patch)
tree3bd4e13ec3f92a00dc4f6c3d65e820b54dbfe46e /drivers/net
parent0f1b1e6d73cb989ce2c071edc57deade3b084dfe (diff)
parentb1586f099ba897542ece36e8a23c1a62907261ef (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Here is my initial pull request for the networking subsystem during this merge window: 1) Support for ESN in AH (RFC 4302) from Fan Du. 2) Add full kernel doc for ethtool command structures, from Ben Hutchings. 3) Add BCM7xxx PHY driver, from Florian Fainelli. 4) Export computed TCP rate information in netlink socket dumps, from Eric Dumazet. 5) Allow IPSEC SA to be dumped partially using a filter, from Nicolas Dichtel. 6) Convert many drivers to pci_enable_msix_range(), from Alexander Gordeev. 7) Record SKB timestamps more efficiently, from Eric Dumazet. 8) Switch to microsecond resolution for TCP round trip times, also from Eric Dumazet. 9) Clean up and fix 6lowpan fragmentation handling by making use of the existing inet_frag api for it's implementation. 10) Add TX grant mapping to xen-netback driver, from Zoltan Kiss. 11) Auto size SKB lengths when composing netlink messages based upon past message sizes used, from Eric Dumazet. 12) qdisc dumps can take a long time, add a cond_resched(), From Eric Dumazet. 13) Sanitize netpoll core and drivers wrt. SKB handling semantics. Get rid of never-used-in-tree netpoll RX handling. From Eric W Biederman. 14) Support inter-address-family and namespace changing in VTI tunnel driver(s). From Steffen Klassert. 15) Add Altera TSE driver, from Vince Bridgers. 16) Optimizing csum_replace2() so that it doesn't adjust the checksum by checksumming the entire header, from Eric Dumazet. 17) Expand BPF internal implementation for faster interpreting, more direct translations into JIT'd code, and much cleaner uses of BPF filtering in non-socket ocntexts. From Daniel Borkmann and Alexei Starovoitov" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1976 commits) netpoll: Use skb_irq_freeable to make zap_completion_queue safe. net: Add a test to see if a skb is freeable in irq context qlcnic: Fix build failure due to undefined reference to `vxlan_get_rx_port' net: ptp: move PTP classifier in its own file net: sxgbe: make "core_ops" static net: sxgbe: fix logical vs bitwise operation net: sxgbe: sxgbe_mdio_register() frees the bus Call efx_set_channels() before efx->type->dimension_resources() xen-netback: disable rogue vif in kthread context net/mlx4: Set proper build dependancy with vxlan be2net: fix build dependency on VxLAN mac802154: make csma/cca parameters per-wpan mac802154: allow only one WPAN to be up at any given time net: filter: minor: fix kdoc in __sk_run_filter netlink: don't compare the nul-termination in nla_strcmp can: c_can: Avoid led toggling for every packet. can: c_can: Simplify TX interrupt cleanup can: c_can: Store dlc private can: c_can: Reduce register access can: c_can: Make the code readable ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/bonding/bond_3ad.c74
-rw-r--r--drivers/net/bonding/bond_3ad.h175
-rw-r--r--drivers/net/bonding/bond_alb.c138
-rw-r--r--drivers/net/bonding/bond_debugfs.c10
-rw-r--r--drivers/net/bonding/bond_main.c466
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c328
-rw-r--r--drivers/net/bonding/bond_options.h62
-rw-r--r--drivers/net/bonding/bond_procfs.c14
-rw-r--r--drivers/net/bonding/bond_sysfs.c24
-rw-r--r--drivers/net/bonding/bonding.h43
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_spi.c1
-rw-r--r--drivers/net/can/at91_can.c10
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c349
-rw-r--r--drivers/net/can/c_can/c_can.h29
-rw-r--r--drivers/net/can/c_can/c_can_platform.c47
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/dev.c183
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/janz-ican3.c65
-rw-r--r--drivers/net/can/mcp251x.c22
-rw-r--r--drivers/net/can/mscan/mscan.c7
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig13
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000.c10
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c220
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c194
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c3
-rw-r--r--drivers/net/dummy.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1127
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c9
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/Kconfig8
-rw-r--r--drivers/net/ethernet/altera/Makefile7
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c202
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h34
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h167
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c509
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h35
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h124
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h486
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c235
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1543
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c44
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h27
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c3
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c124
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c34
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c14
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c15
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c385
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1876
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h368
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c619
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h22
-rw-r--r--drivers/net/ethernet/broadcom/genet/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2584
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h628
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c464
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c30
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c65
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c230
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c172
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c152
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c213
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h97
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c467
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1327
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h114
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c165
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c340
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h52
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c427
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c373
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c370
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c476
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c546
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c434
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c369
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c90
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h16
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h48
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c299
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h75
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c25
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c76
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h36
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c170
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c64
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c360
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c200
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c212
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c141
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h33
-rw-r--r--drivers/net/ethernet/jme.c16
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c17
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c40
-rw-r--r--drivers/net/ethernet/neterion/s2io.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c35
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c57
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c11
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h121
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c91
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c85
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c233
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c25
-rw-r--r--drivers/net/ethernet/rdc/r6040.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c271
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/samsung/Kconfig16
-rw-r--r--drivers/net/ethernet/samsung/Makefile5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Kconfig9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Makefile4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h535
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c262
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c515
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h298
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c382
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c524
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2317
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c244
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c254
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h104
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c259
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h488
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c91
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h38
-rw-r--r--drivers/net/ethernet/sfc/ef10.c27
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h61
-rw-r--r--drivers/net/ethernet/sfc/efx.c33
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c39
-rw-r--r--drivers/net/ethernet/sfc/falcon.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c5
-rw-r--r--drivers/net/ethernet/sfc/filter.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h1
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c93
-rw-r--r--drivers/net/ethernet/sfc/selftest.c6
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c13
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/silan/sc92031.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c130
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c34
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c11
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c16
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c9
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c15
-rw-r--r--drivers/net/ethernet/xscale/Kconfig1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c11
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h207
-rw-r--r--drivers/net/hyperv/netvsc.c93
-rw-r--r--drivers/net/hyperv/netvsc_drv.c337
-rw-r--r--drivers/net/hyperv/rndis_filter.c187
-rw-r--r--drivers/net/ieee802154/Kconfig34
-rw-r--r--drivers/net/ieee802154/at86rf230.c520
-rw-r--r--drivers/net/ieee802154/fakehard.c22
-rw-r--r--drivers/net/ieee802154/mrf24j40.c17
-rw-r--r--drivers/net/ifb.c8
-rw-r--r--drivers/net/loopback.c16
-rw-r--r--drivers/net/macvlan.c13
-rw-r--r--drivers/net/nlmon.c18
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c30
-rw-r--r--drivers/net/phy/bcm7xxx.c359
-rw-r--r--drivers/net/phy/broadcom.c52
-rw-r--r--drivers/net/phy/dp83640.c94
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio_bus.c20
-rw-r--r--drivers/net/phy/micrel.c49
-rw-r--r--drivers/net/phy/phy.c51
-rw-r--r--drivers/net/phy/phy_device.c57
-rw-r--r--drivers/net/ppp/ppp_generic.c60
-rw-r--r--drivers/net/team/team.c28
-rw-r--r--drivers/net/team/team_mode_loadbalance.c4
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c17
-rw-r--r--drivers/net/usb/lg-vl600.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c1124
-rw-r--r--drivers/net/veth.c23
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c112
-rw-r--r--drivers/net/vxlan.c13
-rw-r--r--drivers/net/wimax/i2400m/netdev.c3
-rw-r--r--drivers/net/wireless/Kconfig11
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/airo.c17
-rw-r--r--drivers/net/wireless/ath/ath.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h81
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c269
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c205
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c850
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c536
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c132
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h34
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c235
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c203
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c180
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c260
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c240
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c179
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c247
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c1495
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h248
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c267
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c72
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c64
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c241
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c181
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c234
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c37
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c177
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c334
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h164
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c136
-rw-r--r--drivers/net/wireless/atmel.c8
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/debugfs.h2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/main.h35
-rw-r--r--drivers/net/wireless/b43/phy_common.c4
-rw-r--r--drivers/net/wireless/b43/pio.c10
-rw-r--r--drivers/net/wireless/b43/sysfs.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c14
-rw-r--r--drivers/net/wireless/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/sysfs.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c107
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c1034
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c982
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c972
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h231
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c283
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c20
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h3
-rw-r--r--drivers/net/wireless/cw1200/fwio.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c13
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c15
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h3
-rw-r--r--drivers/net/wireless/iwlegacy/common.c83
-rw-r--r--drivers/net/wireless/iwlegacy/common.h17
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c23
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c270
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h69
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c (renamed from drivers/net/wireless/iwlwifi/mvm/bt-coex.c)474
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c226
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c485
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h (renamed from drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h)21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h134
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h106
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c79
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c78
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c601
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h259
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/offloading.c215
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c481
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c426
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c100
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c210
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c255
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c213
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h62
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c43
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c138
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c85
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c52
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c410
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c73
-rw-r--r--drivers/net/wireless/libertas/cfg.c3
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c113
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h6
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c192
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h2
-rw-r--r--drivers/net/wireless/mwifiex/11h.c4
-rw-r--r--drivers/net/wireless/mwifiex/11n.c106
-rw-r--r--drivers/net/wireless/mwifiex/11n.h58
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c203
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h3
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/README2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c403
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c205
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c161
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c8
-rw-r--r--drivers/net/wireless/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/mwifiex/fw.h202
-rw-r--r--drivers/net/wireless/mwifiex/ie.c6
-rw-r--r--drivers/net/wireless/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h25
-rw-r--r--drivers/net/wireless/mwifiex/join.c66
-rw-r--r--drivers/net/wireless/mwifiex/main.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.h112
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c180
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h5
-rw-r--r--drivers/net/wireless/mwifiex/scan.c615
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c11
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c471
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c130
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c52
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c164
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c34
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c1044
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c24
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c130
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c22
-rw-r--r--drivers/net/wireless/mwifiex/usb.c11
-rw-r--r--drivers/net/wireless/mwifiex/util.c118
-rw-r--r--drivers/net/wireless/mwifiex/util.h20
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c131
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h18
-rw-r--r--drivers/net/wireless/mwl8k.c197
-rw-r--r--drivers/net/wireless/orinoco/cfg.c5
-rw-r--r--drivers/net/wireless/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/orinoco/scan.c5
-rw-r--r--drivers/net/wireless/orinoco/wext.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c7
-rw-r--r--drivers/net/wireless/rsi/Kconfig30
-rw-r--r--drivers/net/wireless/rsi/Makefile12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c342
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c339
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1008
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c295
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c1304
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_pkt.c196
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c850
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c566
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c575
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c177
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h126
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h87
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h48
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h218
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h285
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h129
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1009
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8180.h76
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c23
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c475
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h61
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c20
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h269
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig27
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/Makefile7
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h75
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c3698
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h173
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c1011
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h559
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c218
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h52
-rw-r--r--drivers/net/wireless/rtlwifi/core.c124
-rw-r--r--drivers/net/wireless/rtlwifi/core.h4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c131
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h14
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c119
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h60
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/fw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c128
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c63
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c31
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c50
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c429
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/reg.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c48
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c87
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/def.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c260
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c127
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c530
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/Makefile19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/def.h248
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.c1325
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.h310
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.c620
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.h248
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c2523
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.h64
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/led.c153
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/led.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.c2156
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.h217
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c106
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h304
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c140
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h95
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/reg.h2277
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/rf.c504
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/rf.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c384
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/table.c572
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/table.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c960
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.h617
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/Makefile9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c65
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c329
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h126
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/main.c33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c434
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h89
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h518
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c37
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c31
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c71
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c67
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h53
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c85
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c24
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c200
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c45
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h27
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h86
-rw-r--r--drivers/net/wireless/wl3501_cs.c6
-rw-r--r--drivers/net/wireless/zd1201.c9
-rw-r--r--drivers/net/xen-netback/common.h112
-rw-r--r--drivers/net/xen-netback/interface.c141
-rw-r--r--drivers/net/xen-netback/netback.c813
-rw-r--r--drivers/net/xen-netfront.c14
817 files changed, 77015 insertions, 21834 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 494b888a6568..89402c3b64f8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -177,11 +177,6 @@ config NETCONSOLE_DYNAMIC
177config NETPOLL 177config NETPOLL
178 def_bool NETCONSOLE 178 def_bool NETCONSOLE
179 179
180config NETPOLL_TRAP
181 bool "Netpoll traffic trapping"
182 default n
183 depends on NETPOLL
184
185config NET_POLL_CONTROLLER 180config NET_POLL_CONTROLLER
186 def_bool NETPOLL 181 def_bool NETPOLL
187 182
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index dcde56057fe1..b667a51ed215 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -768,11 +768,11 @@ static int ad_lacpdu_send(struct port *port)
768 768
769 lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); 769 lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
770 770
771 memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); 771 ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
772 /* Note: source address is set to be the member's PERMANENT address, 772 /* Note: source address is set to be the member's PERMANENT address,
773 * because we use it to identify loopback lacpdus in receive. 773 * because we use it to identify loopback lacpdus in receive.
774 */ 774 */
775 memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); 775 ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr);
776 lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; 776 lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
777 777
778 lacpdu_header->lacpdu = port->lacpdu; 778 lacpdu_header->lacpdu = port->lacpdu;
@@ -810,11 +810,11 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
810 810
811 marker_header = (struct bond_marker_header *)skb_put(skb, length); 811 marker_header = (struct bond_marker_header *)skb_put(skb, length);
812 812
813 memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); 813 ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
814 /* Note: source address is set to be the member's PERMANENT address, 814 /* Note: source address is set to be the member's PERMANENT address,
815 * because we use it to identify loopback MARKERs in receive. 815 * because we use it to identify loopback MARKERs in receive.
816 */ 816 */
817 memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); 817 ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr);
818 marker_header->hdr.h_proto = PKT_TYPE_LACPDU; 818 marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
819 819
820 marker_header->marker = *marker; 820 marker_header->marker = *marker;
@@ -1079,7 +1079,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1079 /* detect loopback situation */ 1079 /* detect loopback situation */
1080 if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system), 1080 if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
1081 &(port->actor_system))) { 1081 &(port->actor_system))) {
1082 pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", 1082 pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
1083 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
1083 port->slave->bond->dev->name, 1084 port->slave->bond->dev->name,
1084 port->slave->dev->name); 1085 port->slave->dev->name);
1085 return; 1086 return;
@@ -1283,11 +1284,11 @@ static void ad_port_selection_logic(struct port *port)
1283 /* meaning: the port was related to an aggregator 1284 /* meaning: the port was related to an aggregator
1284 * but was not on the aggregator port list 1285 * but was not on the aggregator port list
1285 */ 1286 */
1286 pr_warn("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n", 1287 pr_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
1287 port->slave->bond->dev->name, 1288 port->slave->bond->dev->name,
1288 port->actor_port_number, 1289 port->actor_port_number,
1289 port->slave->dev->name, 1290 port->slave->dev->name,
1290 port->aggregator->aggregator_identifier); 1291 port->aggregator->aggregator_identifier);
1291 } 1292 }
1292 } 1293 }
1293 /* search on all aggregators for a suitable aggregator for this port */ 1294 /* search on all aggregators for a suitable aggregator for this port */
@@ -1444,9 +1445,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
1444 break; 1445 break;
1445 1446
1446 default: 1447 default:
1447 pr_warn("%s: Impossible agg select mode %d\n", 1448 pr_warn_ratelimited("%s: Impossible agg select mode %d\n",
1448 curr->slave->bond->dev->name, 1449 curr->slave->bond->dev->name,
1449 __get_agg_selection_mode(curr->lag_ports)); 1450 __get_agg_selection_mode(curr->lag_ports));
1450 break; 1451 break;
1451 } 1452 }
1452 1453
@@ -1559,9 +1560,9 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1559 1560
1560 /* check if any partner replys */ 1561 /* check if any partner replys */
1561 if (best->is_individual) { 1562 if (best->is_individual) {
1562 pr_warn("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", 1563 pr_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
1563 best->slave ? 1564 best->slave ?
1564 best->slave->bond->dev->name : "NULL"); 1565 best->slave->bond->dev->name : "NULL");
1565 } 1566 }
1566 1567
1567 best->is_active = 1; 1568 best->is_active = 1;
@@ -1948,7 +1949,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1948 * new aggregator 1949 * new aggregator
1949 */ 1950 */
1950 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) { 1951 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
1951 pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n", 1952 pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
1952 aggregator->aggregator_identifier, 1953 aggregator->aggregator_identifier,
1953 new_aggregator->aggregator_identifier); 1954 new_aggregator->aggregator_identifier);
1954 1955
@@ -2080,8 +2081,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2080 /* select the active aggregator for the bond */ 2081 /* select the active aggregator for the bond */
2081 if (port) { 2082 if (port) {
2082 if (!port->slave) { 2083 if (!port->slave) {
2083 pr_warn("%s: Warning: bond's first port is uninitialized\n", 2084 pr_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
2084 bond->dev->name); 2085 bond->dev->name);
2085 goto re_arm; 2086 goto re_arm;
2086 } 2087 }
2087 2088
@@ -2095,8 +2096,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2095 bond_for_each_slave_rcu(bond, slave, iter) { 2096 bond_for_each_slave_rcu(bond, slave, iter) {
2096 port = &(SLAVE_AD_INFO(slave).port); 2097 port = &(SLAVE_AD_INFO(slave).port);
2097 if (!port->slave) { 2098 if (!port->slave) {
2098 pr_warn("%s: Warning: Found an uninitialized port\n", 2099 pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
2099 bond->dev->name); 2100 bond->dev->name);
2100 goto re_arm; 2101 goto re_arm;
2101 } 2102 }
2102 2103
@@ -2157,8 +2158,8 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
2157 port = &(SLAVE_AD_INFO(slave).port); 2158 port = &(SLAVE_AD_INFO(slave).port);
2158 2159
2159 if (!port->slave) { 2160 if (!port->slave) {
2160 pr_warn("%s: Warning: port of slave %s is uninitialized\n", 2161 pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
2161 slave->dev->name, slave->bond->dev->name); 2162 slave->dev->name, slave->bond->dev->name);
2162 return ret; 2163 return ret;
2163 } 2164 }
2164 2165
@@ -2310,9 +2311,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2310 port->actor_oper_port_key = (port->actor_admin_port_key &= 2311 port->actor_oper_port_key = (port->actor_admin_port_key &=
2311 ~AD_SPEED_KEY_BITS); 2312 ~AD_SPEED_KEY_BITS);
2312 } 2313 }
2313 pr_debug("Port %d changed link status to %s", 2314 pr_debug("Port %d changed link status to %s\n",
2314 port->actor_port_number, 2315 port->actor_port_number,
2315 (link == BOND_LINK_UP) ? "UP" : "DOWN"); 2316 link == BOND_LINK_UP ? "UP" : "DOWN");
2316 /* there is no need to reselect a new aggregator, just signal the 2317 /* there is no need to reselect a new aggregator, just signal the
2317 * state machines to reinitialize 2318 * state machines to reinitialize
2318 */ 2319 */
@@ -2390,17 +2391,16 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
2390 } 2391 }
2391 } 2392 }
2392 2393
2393 if (aggregator) { 2394 if (!aggregator)
2394 ad_info->aggregator_id = aggregator->aggregator_identifier; 2395 return -1;
2395 ad_info->ports = aggregator->num_of_ports;
2396 ad_info->actor_key = aggregator->actor_oper_aggregator_key;
2397 ad_info->partner_key = aggregator->partner_oper_aggregator_key;
2398 memcpy(ad_info->partner_system,
2399 aggregator->partner_system.mac_addr_value, ETH_ALEN);
2400 return 0;
2401 }
2402 2396
2403 return -1; 2397 ad_info->aggregator_id = aggregator->aggregator_identifier;
2398 ad_info->ports = aggregator->num_of_ports;
2399 ad_info->actor_key = aggregator->actor_oper_aggregator_key;
2400 ad_info->partner_key = aggregator->partner_oper_aggregator_key;
2401 ether_addr_copy(ad_info->partner_system,
2402 aggregator->partner_system.mac_addr_value);
2403 return 0;
2404} 2404}
2405 2405
2406/* Wrapper used to hold bond->lock so no slave manipulation can occur */ 2406/* Wrapper used to hold bond->lock so no slave manipulation can occur */
@@ -2479,7 +2479,7 @@ out:
2479 return NETDEV_TX_OK; 2479 return NETDEV_TX_OK;
2480err_free: 2480err_free:
2481 /* no suitable interface, frame not sent */ 2481 /* no suitable interface, frame not sent */
2482 kfree_skb(skb); 2482 dev_kfree_skb_any(skb);
2483 goto out; 2483 goto out;
2484} 2484}
2485 2485
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f4dd9592ac62..bb03b1df2f3e 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -28,7 +28,7 @@
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/if_ether.h> 29#include <linux/if_ether.h>
30 30
31// General definitions 31/* General definitions */
32#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) 32#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
33#define AD_TIMER_INTERVAL 100 /*msec*/ 33#define AD_TIMER_INTERVAL 100 /*msec*/
34 34
@@ -47,54 +47,54 @@ enum {
47 BOND_AD_COUNT = 2, 47 BOND_AD_COUNT = 2,
48}; 48};
49 49
50// rx machine states(43.4.11 in the 802.3ad standard) 50/* rx machine states(43.4.11 in the 802.3ad standard) */
51typedef enum { 51typedef enum {
52 AD_RX_DUMMY, 52 AD_RX_DUMMY,
53 AD_RX_INITIALIZE, // rx Machine 53 AD_RX_INITIALIZE, /* rx Machine */
54 AD_RX_PORT_DISABLED, // rx Machine 54 AD_RX_PORT_DISABLED, /* rx Machine */
55 AD_RX_LACP_DISABLED, // rx Machine 55 AD_RX_LACP_DISABLED, /* rx Machine */
56 AD_RX_EXPIRED, // rx Machine 56 AD_RX_EXPIRED, /* rx Machine */
57 AD_RX_DEFAULTED, // rx Machine 57 AD_RX_DEFAULTED, /* rx Machine */
58 AD_RX_CURRENT // rx Machine 58 AD_RX_CURRENT /* rx Machine */
59} rx_states_t; 59} rx_states_t;
60 60
61// periodic machine states(43.4.12 in the 802.3ad standard) 61/* periodic machine states(43.4.12 in the 802.3ad standard) */
62typedef enum { 62typedef enum {
63 AD_PERIODIC_DUMMY, 63 AD_PERIODIC_DUMMY,
64 AD_NO_PERIODIC, // periodic machine 64 AD_NO_PERIODIC, /* periodic machine */
65 AD_FAST_PERIODIC, // periodic machine 65 AD_FAST_PERIODIC, /* periodic machine */
66 AD_SLOW_PERIODIC, // periodic machine 66 AD_SLOW_PERIODIC, /* periodic machine */
67 AD_PERIODIC_TX // periodic machine 67 AD_PERIODIC_TX /* periodic machine */
68} periodic_states_t; 68} periodic_states_t;
69 69
70// mux machine states(43.4.13 in the 802.3ad standard) 70/* mux machine states(43.4.13 in the 802.3ad standard) */
71typedef enum { 71typedef enum {
72 AD_MUX_DUMMY, 72 AD_MUX_DUMMY,
73 AD_MUX_DETACHED, // mux machine 73 AD_MUX_DETACHED, /* mux machine */
74 AD_MUX_WAITING, // mux machine 74 AD_MUX_WAITING, /* mux machine */
75 AD_MUX_ATTACHED, // mux machine 75 AD_MUX_ATTACHED, /* mux machine */
76 AD_MUX_COLLECTING_DISTRIBUTING // mux machine 76 AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
77} mux_states_t; 77} mux_states_t;
78 78
79// tx machine states(43.4.15 in the 802.3ad standard) 79/* tx machine states(43.4.15 in the 802.3ad standard) */
80typedef enum { 80typedef enum {
81 AD_TX_DUMMY, 81 AD_TX_DUMMY,
82 AD_TRANSMIT // tx Machine 82 AD_TRANSMIT /* tx Machine */
83} tx_states_t; 83} tx_states_t;
84 84
85// rx indication types 85/* rx indication types */
86typedef enum { 86typedef enum {
87 AD_TYPE_LACPDU = 1, // type lacpdu 87 AD_TYPE_LACPDU = 1, /* type lacpdu */
88 AD_TYPE_MARKER // type marker 88 AD_TYPE_MARKER /* type marker */
89} pdu_type_t; 89} pdu_type_t;
90 90
91// rx marker indication types 91/* rx marker indication types */
92typedef enum { 92typedef enum {
93 AD_MARKER_INFORMATION_SUBTYPE = 1, // marker imformation subtype 93 AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
94 AD_MARKER_RESPONSE_SUBTYPE // marker response subtype 94 AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
95} bond_marker_subtype_t; 95} bond_marker_subtype_t;
96 96
97// timers types(43.4.9 in the 802.3ad standard) 97/* timers types(43.4.9 in the 802.3ad standard) */
98typedef enum { 98typedef enum {
99 AD_CURRENT_WHILE_TIMER, 99 AD_CURRENT_WHILE_TIMER,
100 AD_ACTOR_CHURN_TIMER, 100 AD_ACTOR_CHURN_TIMER,
@@ -105,35 +105,35 @@ typedef enum {
105 105
106#pragma pack(1) 106#pragma pack(1)
107 107
108// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) 108/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
109typedef struct lacpdu { 109typedef struct lacpdu {
110 u8 subtype; // = LACP(= 0x01) 110 u8 subtype; /* = LACP(= 0x01) */
111 u8 version_number; 111 u8 version_number;
112 u8 tlv_type_actor_info; // = actor information(type/length/value) 112 u8 tlv_type_actor_info; /* = actor information(type/length/value) */
113 u8 actor_information_length; // = 20 113 u8 actor_information_length; /* = 20 */
114 __be16 actor_system_priority; 114 __be16 actor_system_priority;
115 struct mac_addr actor_system; 115 struct mac_addr actor_system;
116 __be16 actor_key; 116 __be16 actor_key;
117 __be16 actor_port_priority; 117 __be16 actor_port_priority;
118 __be16 actor_port; 118 __be16 actor_port;
119 u8 actor_state; 119 u8 actor_state;
120 u8 reserved_3_1[3]; // = 0 120 u8 reserved_3_1[3]; /* = 0 */
121 u8 tlv_type_partner_info; // = partner information 121 u8 tlv_type_partner_info; /* = partner information */
122 u8 partner_information_length; // = 20 122 u8 partner_information_length; /* = 20 */
123 __be16 partner_system_priority; 123 __be16 partner_system_priority;
124 struct mac_addr partner_system; 124 struct mac_addr partner_system;
125 __be16 partner_key; 125 __be16 partner_key;
126 __be16 partner_port_priority; 126 __be16 partner_port_priority;
127 __be16 partner_port; 127 __be16 partner_port;
128 u8 partner_state; 128 u8 partner_state;
129 u8 reserved_3_2[3]; // = 0 129 u8 reserved_3_2[3]; /* = 0 */
130 u8 tlv_type_collector_info; // = collector information 130 u8 tlv_type_collector_info; /* = collector information */
131 u8 collector_information_length; // = 16 131 u8 collector_information_length;/* = 16 */
132 __be16 collector_max_delay; 132 __be16 collector_max_delay;
133 u8 reserved_12[12]; 133 u8 reserved_12[12];
134 u8 tlv_type_terminator; // = terminator 134 u8 tlv_type_terminator; /* = terminator */
135 u8 terminator_length; // = 0 135 u8 terminator_length; /* = 0 */
136 u8 reserved_50[50]; // = 0 136 u8 reserved_50[50]; /* = 0 */
137} __packed lacpdu_t; 137} __packed lacpdu_t;
138 138
139typedef struct lacpdu_header { 139typedef struct lacpdu_header {
@@ -141,20 +141,20 @@ typedef struct lacpdu_header {
141 struct lacpdu lacpdu; 141 struct lacpdu lacpdu;
142} __packed lacpdu_header_t; 142} __packed lacpdu_header_t;
143 143
144// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) 144/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
145typedef struct bond_marker { 145typedef struct bond_marker {
146 u8 subtype; // = 0x02 (marker PDU) 146 u8 subtype; /* = 0x02 (marker PDU) */
147 u8 version_number; // = 0x01 147 u8 version_number; /* = 0x01 */
148 u8 tlv_type; // = 0x01 (marker information) 148 u8 tlv_type; /* = 0x01 (marker information) */
149 // = 0x02 (marker response information) 149 /* = 0x02 (marker response information) */
150 u8 marker_length; // = 0x16 150 u8 marker_length; /* = 0x16 */
151 u16 requester_port; // The number assigned to the port by the requester 151 u16 requester_port; /* The number assigned to the port by the requester */
152 struct mac_addr requester_system; // The requester's system id 152 struct mac_addr requester_system; /* The requester's system id */
153 u32 requester_transaction_id; // The transaction id allocated by the requester, 153 u32 requester_transaction_id; /* The transaction id allocated by the requester, */
154 u16 pad; // = 0 154 u16 pad; /* = 0 */
155 u8 tlv_type_terminator; // = 0x00 155 u8 tlv_type_terminator; /* = 0x00 */
156 u8 terminator_length; // = 0x00 156 u8 terminator_length; /* = 0x00 */
157 u8 reserved_90[90]; // = 0 157 u8 reserved_90[90]; /* = 0 */
158} __packed bond_marker_t; 158} __packed bond_marker_t;
159 159
160typedef struct bond_marker_header { 160typedef struct bond_marker_header {
@@ -173,7 +173,7 @@ struct port;
173#pragma pack(8) 173#pragma pack(8)
174#endif 174#endif
175 175
176// aggregator structure(43.4.5 in the 802.3ad standard) 176/* aggregator structure(43.4.5 in the 802.3ad standard) */
177typedef struct aggregator { 177typedef struct aggregator {
178 struct mac_addr aggregator_mac_address; 178 struct mac_addr aggregator_mac_address;
179 u16 aggregator_identifier; 179 u16 aggregator_identifier;
@@ -183,12 +183,12 @@ typedef struct aggregator {
183 struct mac_addr partner_system; 183 struct mac_addr partner_system;
184 u16 partner_system_priority; 184 u16 partner_system_priority;
185 u16 partner_oper_aggregator_key; 185 u16 partner_oper_aggregator_key;
186 u16 receive_state; // BOOLEAN 186 u16 receive_state; /* BOOLEAN */
187 u16 transmit_state; // BOOLEAN 187 u16 transmit_state; /* BOOLEAN */
188 struct port *lag_ports; 188 struct port *lag_ports;
189 // ****** PRIVATE PARAMETERS ****** 189 /* ****** PRIVATE PARAMETERS ****** */
190 struct slave *slave; // pointer to the bond slave that this aggregator belongs to 190 struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
191 u16 is_active; // BOOLEAN. Indicates if this aggregator is active 191 u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
192 u16 num_of_ports; 192 u16 num_of_ports;
193} aggregator_t; 193} aggregator_t;
194 194
@@ -201,12 +201,12 @@ struct port_params {
201 u16 port_state; 201 u16 port_state;
202}; 202};
203 203
204// port structure(43.4.6 in the 802.3ad standard) 204/* port structure(43.4.6 in the 802.3ad standard) */
205typedef struct port { 205typedef struct port {
206 u16 actor_port_number; 206 u16 actor_port_number;
207 u16 actor_port_priority; 207 u16 actor_port_priority;
208 struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification 208 struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
209 u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification 209 u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
210 u16 actor_port_aggregator_identifier; 210 u16 actor_port_aggregator_identifier;
211 bool ntt; 211 bool ntt;
212 u16 actor_admin_port_key; 212 u16 actor_admin_port_key;
@@ -219,24 +219,24 @@ typedef struct port {
219 219
220 bool is_enabled; 220 bool is_enabled;
221 221
222 // ****** PRIVATE PARAMETERS ****** 222 /* ****** PRIVATE PARAMETERS ****** */
223 u16 sm_vars; // all state machines variables for this port 223 u16 sm_vars; /* all state machines variables for this port */
224 rx_states_t sm_rx_state; // state machine rx state 224 rx_states_t sm_rx_state; /* state machine rx state */
225 u16 sm_rx_timer_counter; // state machine rx timer counter 225 u16 sm_rx_timer_counter; /* state machine rx timer counter */
226 periodic_states_t sm_periodic_state;// state machine periodic state 226 periodic_states_t sm_periodic_state; /* state machine periodic state */
227 u16 sm_periodic_timer_counter; // state machine periodic timer counter 227 u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
228 mux_states_t sm_mux_state; // state machine mux state 228 mux_states_t sm_mux_state; /* state machine mux state */
229 u16 sm_mux_timer_counter; // state machine mux timer counter 229 u16 sm_mux_timer_counter; /* state machine mux timer counter */
230 tx_states_t sm_tx_state; // state machine tx state 230 tx_states_t sm_tx_state; /* state machine tx state */
231 u16 sm_tx_timer_counter; // state machine tx timer counter(allways on - enter to transmit state 3 time per second) 231 u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
232 struct slave *slave; // pointer to the bond slave that this port belongs to 232 struct slave *slave; /* pointer to the bond slave that this port belongs to */
233 struct aggregator *aggregator; // pointer to an aggregator that this port related to 233 struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
234 struct port *next_port_in_aggregator; // Next port on the linked list of the parent aggregator 234 struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
235 u32 transaction_id; // continuous number for identification of Marker PDU's; 235 u32 transaction_id; /* continuous number for identification of Marker PDU's; */
236 struct lacpdu lacpdu; // the lacpdu that will be sent for this port 236 struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
237} port_t; 237} port_t;
238 238
239// system structure 239/* system structure */
240struct ad_system { 240struct ad_system {
241 u16 sys_priority; 241 u16 sys_priority;
242 struct mac_addr sys_mac_addr; 242 struct mac_addr sys_mac_addr;
@@ -246,27 +246,26 @@ struct ad_system {
246#pragma pack() 246#pragma pack()
247#endif 247#endif
248 248
249// ================= AD Exported structures to the main bonding code ================== 249/* ========== AD Exported structures to the main bonding code ========== */
250#define BOND_AD_INFO(bond) ((bond)->ad_info) 250#define BOND_AD_INFO(bond) ((bond)->ad_info)
251#define SLAVE_AD_INFO(slave) ((slave)->ad_info) 251#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
252 252
253struct ad_bond_info { 253struct ad_bond_info {
254 struct ad_system system; /* 802.3ad system structure */ 254 struct ad_system system; /* 802.3ad system structure */
255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes 255 u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
256 u16 aggregator_identifier; 256 u16 aggregator_identifier;
257}; 257};
258 258
259struct ad_slave_info { 259struct ad_slave_info {
260 struct aggregator aggregator; // 802.3ad aggregator structure 260 struct aggregator aggregator; /* 802.3ad aggregator structure */
261 struct port port; // 802.3ad port structure 261 struct port port; /* 802.3ad port structure */
262 spinlock_t state_machine_lock; /* mutex state machines vs. 262 spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */
263 incoming LACPDU */
264 u16 id; 263 u16 id;
265}; 264};
266 265
267// ================= AD Exported functions to the main bonding code ================== 266/* ========== AD Exported functions to the main bonding code ========== */
268void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution); 267void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
269void bond_3ad_bind_slave(struct slave *slave); 268void bond_3ad_bind_slave(struct slave *slave);
270void bond_3ad_unbind_slave(struct slave *slave); 269void bond_3ad_unbind_slave(struct slave *slave);
271void bond_3ad_state_machine_handler(struct work_struct *); 270void bond_3ad_state_machine_handler(struct work_struct *);
272void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout); 271void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
@@ -281,5 +280,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
281 struct slave *slave); 280 struct slave *slave);
282int bond_3ad_set_carrier(struct bonding *bond); 281int bond_3ad_set_carrier(struct bonding *bond);
283void bond_3ad_update_lacp_rate(struct bonding *bond); 282void bond_3ad_update_lacp_rate(struct bonding *bond);
284#endif //__BOND_3AD_H__ 283#endif /* __BOND_3AD_H__ */
285 284
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e8f133e926aa..9f69e818b000 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -93,9 +93,8 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
93 int i; 93 int i;
94 u8 hash = 0; 94 u8 hash = 0;
95 95
96 for (i = 0; i < hash_size; i++) { 96 for (i = 0; i < hash_size; i++)
97 hash ^= hash_start[i]; 97 hash ^= hash_start[i];
98 }
99 98
100 return hash; 99 return hash;
101} 100}
@@ -190,9 +189,8 @@ static int tlb_initialize(struct bonding *bond)
190 189
191 bond_info->tx_hashtbl = new_hashtbl; 190 bond_info->tx_hashtbl = new_hashtbl;
192 191
193 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { 192 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
194 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); 193 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
195 }
196 194
197 _unlock_tx_hashtbl_bh(bond); 195 _unlock_tx_hashtbl_bh(bond);
198 196
@@ -264,9 +262,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
264 hash_table[hash_index].next = next_index; 262 hash_table[hash_index].next = next_index;
265 hash_table[hash_index].prev = TLB_NULL_INDEX; 263 hash_table[hash_index].prev = TLB_NULL_INDEX;
266 264
267 if (next_index != TLB_NULL_INDEX) { 265 if (next_index != TLB_NULL_INDEX)
268 hash_table[next_index].prev = hash_index; 266 hash_table[next_index].prev = hash_index;
269 }
270 267
271 slave_info->head = hash_index; 268 slave_info->head = hash_index;
272 slave_info->load += 269 slave_info->load +=
@@ -274,9 +271,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
274 } 271 }
275 } 272 }
276 273
277 if (assigned_slave) { 274 if (assigned_slave)
278 hash_table[hash_index].tx_bytes += skb_len; 275 hash_table[hash_index].tx_bytes += skb_len;
279 }
280 276
281 return assigned_slave; 277 return assigned_slave;
282} 278}
@@ -329,7 +325,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
329 325
330 _lock_rx_hashtbl_bh(bond); 326 _lock_rx_hashtbl_bh(bond);
331 327
332 hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src)); 328 hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
333 client_info = &(bond_info->rx_hashtbl[hash_index]); 329 client_info = &(bond_info->rx_hashtbl[hash_index]);
334 330
335 if ((client_info->assigned) && 331 if ((client_info->assigned) &&
@@ -337,7 +333,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
337 (client_info->ip_dst == arp->ip_src) && 333 (client_info->ip_dst == arp->ip_src) &&
338 (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) { 334 (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
339 /* update the clients MAC address */ 335 /* update the clients MAC address */
340 memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN); 336 ether_addr_copy(client_info->mac_dst, arp->mac_src);
341 client_info->ntt = 1; 337 client_info->ntt = 1;
342 bond_info->rx_ntt = 1; 338 bond_info->rx_ntt = 1;
343 } 339 }
@@ -451,9 +447,8 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
451 */ 447 */
452static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) 448static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
453{ 449{
454 if (!bond->curr_active_slave) { 450 if (!bond->curr_active_slave)
455 return; 451 return;
456 }
457 452
458 if (!bond->alb_info.primary_is_promisc) { 453 if (!bond->alb_info.primary_is_promisc) {
459 if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1)) 454 if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
@@ -513,9 +508,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
513 508
514 write_lock_bh(&bond->curr_slave_lock); 509 write_lock_bh(&bond->curr_slave_lock);
515 510
516 if (slave != bond->curr_active_slave) { 511 if (slave != bond->curr_active_slave)
517 rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); 512 rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
518 }
519 513
520 write_unlock_bh(&bond->curr_slave_lock); 514 write_unlock_bh(&bond->curr_slave_lock);
521} 515}
@@ -524,9 +518,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
524{ 518{
525 int i; 519 int i;
526 520
527 if (!client_info->slave) { 521 if (!client_info->slave)
528 return; 522 return;
529 }
530 523
531 for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { 524 for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
532 struct sk_buff *skb; 525 struct sk_buff *skb;
@@ -574,9 +567,8 @@ static void rlb_update_rx_clients(struct bonding *bond)
574 client_info = &(bond_info->rx_hashtbl[hash_index]); 567 client_info = &(bond_info->rx_hashtbl[hash_index]);
575 if (client_info->ntt) { 568 if (client_info->ntt) {
576 rlb_update_client(client_info); 569 rlb_update_client(client_info);
577 if (bond_info->rlb_update_retry_counter == 0) { 570 if (bond_info->rlb_update_retry_counter == 0)
578 client_info->ntt = 0; 571 client_info->ntt = 0;
579 }
580 } 572 }
581 } 573 }
582 574
@@ -610,10 +602,10 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
610 } 602 }
611 } 603 }
612 604
613 // update the team's flag only after the whole iteration 605 /* update the team's flag only after the whole iteration */
614 if (ntt) { 606 if (ntt) {
615 bond_info->rx_ntt = 1; 607 bond_info->rx_ntt = 1;
616 //fasten the change 608 /* fasten the change */
617 bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; 609 bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
618 } 610 }
619 611
@@ -677,9 +669,9 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
677 /* the entry is already assigned to this client */ 669 /* the entry is already assigned to this client */
678 if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) { 670 if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
679 /* update mac address from arp */ 671 /* update mac address from arp */
680 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); 672 ether_addr_copy(client_info->mac_dst, arp->mac_dst);
681 } 673 }
682 memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN); 674 ether_addr_copy(client_info->mac_src, arp->mac_src);
683 675
684 assigned_slave = client_info->slave; 676 assigned_slave = client_info->slave;
685 if (assigned_slave) { 677 if (assigned_slave) {
@@ -719,8 +711,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
719 * will be updated with clients actual unicast mac address 711 * will be updated with clients actual unicast mac address
720 * upon receiving an arp reply. 712 * upon receiving an arp reply.
721 */ 713 */
722 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); 714 ether_addr_copy(client_info->mac_dst, arp->mac_dst);
723 memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN); 715 ether_addr_copy(client_info->mac_src, arp->mac_src);
724 client_info->slave = assigned_slave; 716 client_info->slave = assigned_slave;
725 717
726 if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { 718 if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -770,9 +762,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
770 * rx channel 762 * rx channel
771 */ 763 */
772 tx_slave = rlb_choose_channel(skb, bond); 764 tx_slave = rlb_choose_channel(skb, bond);
773 if (tx_slave) { 765 if (tx_slave)
774 memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN); 766 ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
775 }
776 pr_debug("Server sent ARP Reply packet\n"); 767 pr_debug("Server sent ARP Reply packet\n");
777 } else if (arp->op_code == htons(ARPOP_REQUEST)) { 768 } else if (arp->op_code == htons(ARPOP_REQUEST)) {
778 /* Create an entry in the rx_hashtbl for this client as a 769 /* Create an entry in the rx_hashtbl for this client as a
@@ -824,9 +815,8 @@ static void rlb_rebalance(struct bonding *bond)
824 } 815 }
825 816
826 /* update the team's flag only after the whole iteration */ 817 /* update the team's flag only after the whole iteration */
827 if (ntt) { 818 if (ntt)
828 bond_info->rx_ntt = 1; 819 bond_info->rx_ntt = 1;
829 }
830 _unlock_rx_hashtbl_bh(bond); 820 _unlock_rx_hashtbl_bh(bond);
831} 821}
832 822
@@ -923,7 +913,7 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
923static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) 913static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
924{ 914{
925 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 915 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
926 u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src)); 916 u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
927 u32 index; 917 u32 index;
928 918
929 _lock_rx_hashtbl_bh(bond); 919 _lock_rx_hashtbl_bh(bond);
@@ -957,9 +947,8 @@ static int rlb_initialize(struct bonding *bond)
957 947
958 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; 948 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
959 949
960 for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) { 950 for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
961 rlb_init_table_entry(bond_info->rx_hashtbl + i); 951 rlb_init_table_entry(bond_info->rx_hashtbl + i);
962 }
963 952
964 _unlock_rx_hashtbl_bh(bond); 953 _unlock_rx_hashtbl_bh(bond);
965 954
@@ -1014,9 +1003,9 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1014 char *data; 1003 char *data;
1015 1004
1016 memset(&pkt, 0, size); 1005 memset(&pkt, 0, size);
1017 memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); 1006 ether_addr_copy(pkt.mac_dst, mac_addr);
1018 memcpy(pkt.mac_src, mac_addr, ETH_ALEN); 1007 ether_addr_copy(pkt.mac_src, mac_addr);
1019 pkt.type = cpu_to_be16(ETH_P_LOOP); 1008 pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
1020 1009
1021 skb = dev_alloc_skb(size); 1010 skb = dev_alloc_skb(size);
1022 if (!skb) 1011 if (!skb)
@@ -1097,7 +1086,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
1097{ 1086{
1098 u8 tmp_mac_addr[ETH_ALEN]; 1087 u8 tmp_mac_addr[ETH_ALEN];
1099 1088
1100 memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); 1089 ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
1101 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr); 1090 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
1102 alb_set_slave_mac_addr(slave2, tmp_mac_addr); 1091 alb_set_slave_mac_addr(slave2, tmp_mac_addr);
1103 1092
@@ -1254,9 +1243,9 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1254 if (free_mac_slave) { 1243 if (free_mac_slave) {
1255 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr); 1244 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
1256 1245
1257 pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n", 1246 pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
1258 bond->dev->name, slave->dev->name, 1247 bond->dev->name, slave->dev->name,
1259 free_mac_slave->dev->name); 1248 free_mac_slave->dev->name);
1260 1249
1261 } else if (has_bond_addr) { 1250 } else if (has_bond_addr) {
1262 pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n", 1251 pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
@@ -1294,12 +1283,12 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
1294 1283
1295 bond_for_each_slave(bond, slave, iter) { 1284 bond_for_each_slave(bond, slave, iter) {
1296 /* save net_device's current hw address */ 1285 /* save net_device's current hw address */
1297 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); 1286 ether_addr_copy(tmp_addr, slave->dev->dev_addr);
1298 1287
1299 res = dev_set_mac_address(slave->dev, addr); 1288 res = dev_set_mac_address(slave->dev, addr);
1300 1289
1301 /* restore net_device's hw address */ 1290 /* restore net_device's hw address */
1302 memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN); 1291 ether_addr_copy(slave->dev->dev_addr, tmp_addr);
1303 1292
1304 if (res) 1293 if (res)
1305 goto unwind; 1294 goto unwind;
@@ -1315,9 +1304,9 @@ unwind:
1315 bond_for_each_slave(bond, rollback_slave, iter) { 1304 bond_for_each_slave(bond, rollback_slave, iter) {
1316 if (rollback_slave == slave) 1305 if (rollback_slave == slave)
1317 break; 1306 break;
1318 memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN); 1307 ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
1319 dev_set_mac_address(rollback_slave->dev, &sa); 1308 dev_set_mac_address(rollback_slave->dev, &sa);
1320 memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN); 1309 ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
1321 } 1310 }
1322 1311
1323 return res; 1312 return res;
@@ -1330,9 +1319,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
1330 int res; 1319 int res;
1331 1320
1332 res = tlb_initialize(bond); 1321 res = tlb_initialize(bond);
1333 if (res) { 1322 if (res)
1334 return res; 1323 return res;
1335 }
1336 1324
1337 if (rlb_enabled) { 1325 if (rlb_enabled) {
1338 bond->alb_info.rlb_enabled = 1; 1326 bond->alb_info.rlb_enabled = 1;
@@ -1355,9 +1343,8 @@ void bond_alb_deinitialize(struct bonding *bond)
1355 1343
1356 tlb_deinitialize(bond); 1344 tlb_deinitialize(bond);
1357 1345
1358 if (bond_info->rlb_enabled) { 1346 if (bond_info->rlb_enabled)
1359 rlb_deinitialize(bond); 1347 rlb_deinitialize(bond);
1360 }
1361} 1348}
1362 1349
1363int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) 1350int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
@@ -1436,14 +1423,13 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1436 break; 1423 break;
1437 } 1424 }
1438 1425
1439 hash_start = (char*)eth_data->h_dest; 1426 hash_start = (char *)eth_data->h_dest;
1440 hash_size = ETH_ALEN; 1427 hash_size = ETH_ALEN;
1441 break; 1428 break;
1442 case ETH_P_ARP: 1429 case ETH_P_ARP:
1443 do_tx_balance = 0; 1430 do_tx_balance = 0;
1444 if (bond_info->rlb_enabled) { 1431 if (bond_info->rlb_enabled)
1445 tx_slave = rlb_arp_xmit(skb, bond); 1432 tx_slave = rlb_arp_xmit(skb, bond);
1446 }
1447 break; 1433 break;
1448 default: 1434 default:
1449 do_tx_balance = 0; 1435 do_tx_balance = 0;
@@ -1463,23 +1449,22 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1463 1449
1464 if (tx_slave && SLAVE_IS_OK(tx_slave)) { 1450 if (tx_slave && SLAVE_IS_OK(tx_slave)) {
1465 if (tx_slave != rcu_dereference(bond->curr_active_slave)) { 1451 if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
1466 memcpy(eth_data->h_source, 1452 ether_addr_copy(eth_data->h_source,
1467 tx_slave->dev->dev_addr, 1453 tx_slave->dev->dev_addr);
1468 ETH_ALEN);
1469 } 1454 }
1470 1455
1471 bond_dev_queue_xmit(bond, skb, tx_slave->dev); 1456 bond_dev_queue_xmit(bond, skb, tx_slave->dev);
1472 goto out; 1457 goto out;
1473 } else { 1458 }
1474 if (tx_slave) { 1459
1475 _lock_tx_hashtbl(bond); 1460 if (tx_slave) {
1476 __tlb_clear_slave(bond, tx_slave, 0); 1461 _lock_tx_hashtbl(bond);
1477 _unlock_tx_hashtbl(bond); 1462 __tlb_clear_slave(bond, tx_slave, 0);
1478 } 1463 _unlock_tx_hashtbl(bond);
1479 } 1464 }
1480 1465
1481 /* no suitable interface, frame not sent */ 1466 /* no suitable interface, frame not sent */
1482 kfree_skb(skb); 1467 dev_kfree_skb_any(skb);
1483out: 1468out:
1484 return NETDEV_TX_OK; 1469 return NETDEV_TX_OK;
1485} 1470}
@@ -1577,11 +1562,10 @@ void bond_alb_monitor(struct work_struct *work)
1577 --bond_info->rlb_update_delay_counter; 1562 --bond_info->rlb_update_delay_counter;
1578 } else { 1563 } else {
1579 rlb_update_rx_clients(bond); 1564 rlb_update_rx_clients(bond);
1580 if (bond_info->rlb_update_retry_counter) { 1565 if (bond_info->rlb_update_retry_counter)
1581 --bond_info->rlb_update_retry_counter; 1566 --bond_info->rlb_update_retry_counter;
1582 } else { 1567 else
1583 bond_info->rx_ntt = 0; 1568 bond_info->rx_ntt = 0;
1584 }
1585 } 1569 }
1586 } 1570 }
1587 } 1571 }
@@ -1598,23 +1582,20 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1598 int res; 1582 int res;
1599 1583
1600 res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr); 1584 res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
1601 if (res) { 1585 if (res)
1602 return res; 1586 return res;
1603 }
1604 1587
1605 res = alb_handle_addr_collision_on_attach(bond, slave); 1588 res = alb_handle_addr_collision_on_attach(bond, slave);
1606 if (res) { 1589 if (res)
1607 return res; 1590 return res;
1608 }
1609 1591
1610 tlb_init_slave(slave); 1592 tlb_init_slave(slave);
1611 1593
1612 /* order a rebalance ASAP */ 1594 /* order a rebalance ASAP */
1613 bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; 1595 bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
1614 1596
1615 if (bond->alb_info.rlb_enabled) { 1597 if (bond->alb_info.rlb_enabled)
1616 bond->alb_info.rlb_rebalance = 1; 1598 bond->alb_info.rlb_rebalance = 1;
1617 }
1618 1599
1619 return 0; 1600 return 0;
1620} 1601}
@@ -1645,9 +1626,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
1645 1626
1646 if (link == BOND_LINK_DOWN) { 1627 if (link == BOND_LINK_DOWN) {
1647 tlb_clear_slave(bond, slave, 0); 1628 tlb_clear_slave(bond, slave, 0);
1648 if (bond->alb_info.rlb_enabled) { 1629 if (bond->alb_info.rlb_enabled)
1649 rlb_clear_slave(bond, slave); 1630 rlb_clear_slave(bond, slave);
1650 }
1651 } else if (link == BOND_LINK_UP) { 1631 } else if (link == BOND_LINK_UP) {
1652 /* order a rebalance ASAP */ 1632 /* order a rebalance ASAP */
1653 bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; 1633 bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
@@ -1723,14 +1703,14 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1723 struct sockaddr sa; 1703 struct sockaddr sa;
1724 u8 tmp_addr[ETH_ALEN]; 1704 u8 tmp_addr[ETH_ALEN];
1725 1705
1726 memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN); 1706 ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
1727 1707
1728 memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len); 1708 memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
1729 sa.sa_family = bond->dev->type; 1709 sa.sa_family = bond->dev->type;
1730 /* we don't care if it can't change its mac, best effort */ 1710 /* we don't care if it can't change its mac, best effort */
1731 dev_set_mac_address(new_slave->dev, &sa); 1711 dev_set_mac_address(new_slave->dev, &sa);
1732 1712
1733 memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN); 1713 ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
1734 } 1714 }
1735 1715
1736 /* curr_active_slave must be set before calling alb_swap_mac_addr */ 1716 /* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1759,14 +1739,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1759 struct slave *swap_slave; 1739 struct slave *swap_slave;
1760 int res; 1740 int res;
1761 1741
1762 if (!is_valid_ether_addr(sa->sa_data)) { 1742 if (!is_valid_ether_addr(sa->sa_data))
1763 return -EADDRNOTAVAIL; 1743 return -EADDRNOTAVAIL;
1764 }
1765 1744
1766 res = alb_set_mac_address(bond, addr); 1745 res = alb_set_mac_address(bond, addr);
1767 if (res) { 1746 if (res)
1768 return res; 1747 return res;
1769 }
1770 1748
1771 memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len); 1749 memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
1772 1750
@@ -1774,9 +1752,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1774 * Otherwise we'll need to pass the new address to it and handle 1752 * Otherwise we'll need to pass the new address to it and handle
1775 * duplications. 1753 * duplications.
1776 */ 1754 */
1777 if (!bond->curr_active_slave) { 1755 if (!bond->curr_active_slave)
1778 return 0; 1756 return 0;
1779 }
1780 1757
1781 swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr); 1758 swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
1782 1759
@@ -1800,8 +1777,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1800 1777
1801void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id) 1778void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
1802{ 1779{
1803 if (bond->alb_info.rlb_enabled) { 1780 if (bond->alb_info.rlb_enabled)
1804 rlb_clear_vlan(bond, vlan_id); 1781 rlb_clear_vlan(bond, vlan_id);
1805 }
1806} 1782}
1807 1783
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 5fc4c2351478..2d3f7fa541ff 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -69,7 +69,7 @@ void bond_debug_register(struct bonding *bond)
69 debugfs_create_dir(bond->dev->name, bonding_debug_root); 69 debugfs_create_dir(bond->dev->name, bonding_debug_root);
70 70
71 if (!bond->debug_dir) { 71 if (!bond->debug_dir) {
72 pr_warning("%s: Warning: failed to register to debugfs\n", 72 pr_warn("%s: Warning: failed to register to debugfs\n",
73 bond->dev->name); 73 bond->dev->name);
74 return; 74 return;
75 } 75 }
@@ -98,9 +98,8 @@ void bond_debug_reregister(struct bonding *bond)
98 if (d) { 98 if (d) {
99 bond->debug_dir = d; 99 bond->debug_dir = d;
100 } else { 100 } else {
101 pr_warning("%s: Warning: failed to reregister, " 101 pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
102 "so just unregister old one\n", 102 bond->dev->name);
103 bond->dev->name);
104 bond_debug_unregister(bond); 103 bond_debug_unregister(bond);
105 } 104 }
106} 105}
@@ -110,8 +109,7 @@ void bond_create_debugfs(void)
110 bonding_debug_root = debugfs_create_dir("bonding", NULL); 109 bonding_debug_root = debugfs_create_dir("bonding", NULL);
111 110
112 if (!bonding_debug_root) { 111 if (!bonding_debug_root) {
113 pr_warning("Warning: Cannot create bonding directory" 112 pr_warn("Warning: Cannot create bonding directory in debugfs\n");
114 " in debugfs\n");
115 } 113 }
116} 114}
117 115
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e5628fc725c3..95a6ca7d9e51 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -673,12 +673,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
673 write_unlock_bh(&bond->curr_slave_lock); 673 write_unlock_bh(&bond->curr_slave_lock);
674 674
675 if (old_active) { 675 if (old_active) {
676 memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN); 676 ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
677 memcpy(saddr.sa_data, old_active->dev->dev_addr, 677 ether_addr_copy(saddr.sa_data,
678 ETH_ALEN); 678 old_active->dev->dev_addr);
679 saddr.sa_family = new_active->dev->type; 679 saddr.sa_family = new_active->dev->type;
680 } else { 680 } else {
681 memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN); 681 ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
682 saddr.sa_family = bond->dev->type; 682 saddr.sa_family = bond->dev->type;
683 } 683 }
684 684
@@ -692,7 +692,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
692 if (!old_active) 692 if (!old_active)
693 goto out; 693 goto out;
694 694
695 memcpy(saddr.sa_data, tmp_mac, ETH_ALEN); 695 ether_addr_copy(saddr.sa_data, tmp_mac);
696 saddr.sa_family = old_active->dev->type; 696 saddr.sa_family = old_active->dev->type;
697 697
698 rv = dev_set_mac_address(old_active->dev, &saddr); 698 rv = dev_set_mac_address(old_active->dev, &saddr);
@@ -798,11 +798,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
798 return; 798 return;
799 799
800 if (new_active) { 800 if (new_active) {
801 new_active->jiffies = jiffies; 801 new_active->last_link_up = jiffies;
802 802
803 if (new_active->link == BOND_LINK_BACK) { 803 if (new_active->link == BOND_LINK_BACK) {
804 if (USES_PRIMARY(bond->params.mode)) { 804 if (USES_PRIMARY(bond->params.mode)) {
805 pr_info("%s: making interface %s the new active one %d ms earlier.\n", 805 pr_info("%s: making interface %s the new active one %d ms earlier\n",
806 bond->dev->name, new_active->dev->name, 806 bond->dev->name, new_active->dev->name,
807 (bond->params.updelay - new_active->delay) * bond->params.miimon); 807 (bond->params.updelay - new_active->delay) * bond->params.miimon);
808 } 808 }
@@ -817,7 +817,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
817 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 817 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
818 } else { 818 } else {
819 if (USES_PRIMARY(bond->params.mode)) { 819 if (USES_PRIMARY(bond->params.mode)) {
820 pr_info("%s: making interface %s the new active one.\n", 820 pr_info("%s: making interface %s the new active one\n",
821 bond->dev->name, new_active->dev->name); 821 bond->dev->name, new_active->dev->name);
822 } 822 }
823 } 823 }
@@ -910,7 +910,7 @@ void bond_select_active_slave(struct bonding *bond)
910 pr_info("%s: first active interface up!\n", 910 pr_info("%s: first active interface up!\n",
911 bond->dev->name); 911 bond->dev->name);
912 } else { 912 } else {
913 pr_info("%s: now running without any active interface !\n", 913 pr_info("%s: now running without any active interface!\n",
914 bond->dev->name); 914 bond->dev->name);
915 } 915 }
916 } 916 }
@@ -922,12 +922,12 @@ static inline int slave_enable_netpoll(struct slave *slave)
922 struct netpoll *np; 922 struct netpoll *np;
923 int err = 0; 923 int err = 0;
924 924
925 np = kzalloc(sizeof(*np), GFP_ATOMIC); 925 np = kzalloc(sizeof(*np), GFP_KERNEL);
926 err = -ENOMEM; 926 err = -ENOMEM;
927 if (!np) 927 if (!np)
928 goto out; 928 goto out;
929 929
930 err = __netpoll_setup(np, slave->dev, GFP_ATOMIC); 930 err = __netpoll_setup(np, slave->dev);
931 if (err) { 931 if (err) {
932 kfree(np); 932 kfree(np);
933 goto out; 933 goto out;
@@ -946,14 +946,6 @@ static inline void slave_disable_netpoll(struct slave *slave)
946 slave->np = NULL; 946 slave->np = NULL;
947 __netpoll_free_async(np); 947 __netpoll_free_async(np);
948} 948}
949static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
950{
951 if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
952 return false;
953 if (!slave_dev->netdev_ops->ndo_poll_controller)
954 return false;
955 return true;
956}
957 949
958static void bond_poll_controller(struct net_device *bond_dev) 950static void bond_poll_controller(struct net_device *bond_dev)
959{ 951{
@@ -970,7 +962,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
970 slave_disable_netpoll(slave); 962 slave_disable_netpoll(slave);
971} 963}
972 964
973static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) 965static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
974{ 966{
975 struct bonding *bond = netdev_priv(dev); 967 struct bonding *bond = netdev_priv(dev);
976 struct list_head *iter; 968 struct list_head *iter;
@@ -1119,9 +1111,6 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1119 slave = bond_slave_get_rcu(skb->dev); 1111 slave = bond_slave_get_rcu(skb->dev);
1120 bond = slave->bond; 1112 bond = slave->bond;
1121 1113
1122 if (bond->params.arp_interval)
1123 slave->dev->last_rx = jiffies;
1124
1125 recv_probe = ACCESS_ONCE(bond->recv_probe); 1114 recv_probe = ACCESS_ONCE(bond->recv_probe);
1126 if (recv_probe) { 1115 if (recv_probe) {
1127 ret = recv_probe(skb, bond, slave); 1116 ret = recv_probe(skb, bond, slave);
@@ -1146,7 +1135,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1146 kfree_skb(skb); 1135 kfree_skb(skb);
1147 return RX_HANDLER_CONSUMED; 1136 return RX_HANDLER_CONSUMED;
1148 } 1137 }
1149 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); 1138 ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
1150 } 1139 }
1151 1140
1152 return ret; 1141 return ret;
@@ -1187,13 +1176,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1187 if (!bond->params.use_carrier && 1176 if (!bond->params.use_carrier &&
1188 slave_dev->ethtool_ops->get_link == NULL && 1177 slave_dev->ethtool_ops->get_link == NULL &&
1189 slave_ops->ndo_do_ioctl == NULL) { 1178 slave_ops->ndo_do_ioctl == NULL) {
1190 pr_warning("%s: Warning: no link monitoring support for %s\n", 1179 pr_warn("%s: Warning: no link monitoring support for %s\n",
1191 bond_dev->name, slave_dev->name); 1180 bond_dev->name, slave_dev->name);
1192 } 1181 }
1193 1182
1194 /* already enslaved */ 1183 /* already enslaved */
1195 if (slave_dev->flags & IFF_SLAVE) { 1184 if (slave_dev->flags & IFF_SLAVE) {
1196 pr_debug("Error, Device was already enslaved\n"); 1185 pr_debug("Error: Device was already enslaved\n");
1197 return -EBUSY; 1186 return -EBUSY;
1198 } 1187 }
1199 1188
@@ -1211,9 +1200,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1211 bond_dev->name, slave_dev->name, bond_dev->name); 1200 bond_dev->name, slave_dev->name, bond_dev->name);
1212 return -EPERM; 1201 return -EPERM;
1213 } else { 1202 } else {
1214 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", 1203 pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
1215 bond_dev->name, slave_dev->name, 1204 bond_dev->name, slave_dev->name,
1216 slave_dev->name, bond_dev->name); 1205 slave_dev->name, bond_dev->name);
1217 } 1206 }
1218 } else { 1207 } else {
1219 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1208 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
@@ -1226,7 +1215,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1226 * enslaving it; the old ifenslave will not. 1215 * enslaving it; the old ifenslave will not.
1227 */ 1216 */
1228 if ((slave_dev->flags & IFF_UP)) { 1217 if ((slave_dev->flags & IFF_UP)) {
1229 pr_err("%s is up. This may be due to an out of date ifenslave.\n", 1218 pr_err("%s is up - this may be due to an out of date ifenslave\n",
1230 slave_dev->name); 1219 slave_dev->name);
1231 res = -EPERM; 1220 res = -EPERM;
1232 goto err_undo_flags; 1221 goto err_undo_flags;
@@ -1270,24 +1259,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1270 bond_dev); 1259 bond_dev);
1271 } 1260 }
1272 } else if (bond_dev->type != slave_dev->type) { 1261 } else if (bond_dev->type != slave_dev->type) {
1273 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", 1262 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
1274 slave_dev->name, 1263 slave_dev->name, slave_dev->type, bond_dev->type);
1275 slave_dev->type, bond_dev->type);
1276 res = -EINVAL; 1264 res = -EINVAL;
1277 goto err_undo_flags; 1265 goto err_undo_flags;
1278 } 1266 }
1279 1267
1280 if (slave_ops->ndo_set_mac_address == NULL) { 1268 if (slave_ops->ndo_set_mac_address == NULL) {
1281 if (!bond_has_slaves(bond)) { 1269 if (!bond_has_slaves(bond)) {
1282 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n", 1270 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
1283 bond_dev->name); 1271 bond_dev->name);
1284 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 1272 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
1285 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1273 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1286 pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n", 1274 pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
1287 bond_dev->name); 1275 bond_dev->name);
1288 } 1276 }
1289 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1277 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1290 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n", 1278 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
1291 bond_dev->name); 1279 bond_dev->name);
1292 res = -EOPNOTSUPP; 1280 res = -EOPNOTSUPP;
1293 goto err_undo_flags; 1281 goto err_undo_flags;
@@ -1326,7 +1314,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1326 * that need it, and for restoring it upon release, and then 1314 * that need it, and for restoring it upon release, and then
1327 * set it to the master's address 1315 * set it to the master's address
1328 */ 1316 */
1329 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); 1317 ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
1330 1318
1331 if (!bond->params.fail_over_mac || 1319 if (!bond->params.fail_over_mac ||
1332 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1320 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
@@ -1410,10 +1398,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1410 1398
1411 bond_update_speed_duplex(new_slave); 1399 bond_update_speed_duplex(new_slave);
1412 1400
1413 new_slave->last_arp_rx = jiffies - 1401 new_slave->last_rx = jiffies -
1414 (msecs_to_jiffies(bond->params.arp_interval) + 1); 1402 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1415 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) 1403 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1416 new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx; 1404 new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1417 1405
1418 if (bond->params.miimon && !bond->params.use_carrier) { 1406 if (bond->params.miimon && !bond->params.use_carrier) {
1419 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1407 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1428,12 +1416,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1428 * supported); thus, we don't need to change 1416 * supported); thus, we don't need to change
1429 * the messages for netif_carrier. 1417 * the messages for netif_carrier.
1430 */ 1418 */
1431 pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n", 1419 pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
1432 bond_dev->name, slave_dev->name); 1420 bond_dev->name, slave_dev->name);
1433 } else if (link_reporting == -1) { 1421 } else if (link_reporting == -1) {
1434 /* unable get link status using mii/ethtool */ 1422 /* unable get link status using mii/ethtool */
1435 pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n", 1423 pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
1436 bond_dev->name, slave_dev->name); 1424 bond_dev->name, slave_dev->name);
1437 } 1425 }
1438 } 1426 }
1439 1427
@@ -1457,10 +1445,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1457 } 1445 }
1458 1446
1459 if (new_slave->link != BOND_LINK_DOWN) 1447 if (new_slave->link != BOND_LINK_DOWN)
1460 new_slave->jiffies = jiffies; 1448 new_slave->last_link_up = jiffies;
1461 pr_debug("Initial state of slave_dev is BOND_LINK_%s\n", 1449 pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
1462 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 1450 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1463 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 1451 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1464 1452
1465 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1453 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1466 /* if there is a primary slave, remember it */ 1454 /* if there is a primary slave, remember it */
@@ -1520,9 +1508,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1520 slave_dev->npinfo = bond->dev->npinfo; 1508 slave_dev->npinfo = bond->dev->npinfo;
1521 if (slave_dev->npinfo) { 1509 if (slave_dev->npinfo) {
1522 if (slave_enable_netpoll(new_slave)) { 1510 if (slave_enable_netpoll(new_slave)) {
1523 pr_info("Error, %s: master_dev is using netpoll, " 1511 pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
1524 "but new slave device does not support netpoll.\n", 1512 bond_dev->name);
1525 bond_dev->name);
1526 res = -EBUSY; 1513 res = -EBUSY;
1527 goto err_detach; 1514 goto err_detach;
1528 } 1515 }
@@ -1560,10 +1547,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1560 unblock_netpoll_tx(); 1547 unblock_netpoll_tx();
1561 } 1548 }
1562 1549
1563 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1550 pr_info("%s: Enslaving %s as %s interface with %s link\n",
1564 bond_dev->name, slave_dev->name, 1551 bond_dev->name, slave_dev->name,
1565 bond_is_active_slave(new_slave) ? "n active" : " backup", 1552 bond_is_active_slave(new_slave) ? "an active" : "a backup",
1566 new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); 1553 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
1567 1554
1568 /* enslave is successful */ 1555 /* enslave is successful */
1569 return 0; 1556 return 0;
@@ -1603,7 +1590,7 @@ err_restore_mac:
1603 * MAC if this slave's MAC is in use by the bond, or at 1590 * MAC if this slave's MAC is in use by the bond, or at
1604 * least print a warning. 1591 * least print a warning.
1605 */ 1592 */
1606 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); 1593 ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
1607 addr.sa_family = slave_dev->type; 1594 addr.sa_family = slave_dev->type;
1608 dev_set_mac_address(slave_dev, &addr); 1595 dev_set_mac_address(slave_dev, &addr);
1609 } 1596 }
@@ -1648,7 +1635,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1648 /* slave is not a slave or master is not master of this slave */ 1635 /* slave is not a slave or master is not master of this slave */
1649 if (!(slave_dev->flags & IFF_SLAVE) || 1636 if (!(slave_dev->flags & IFF_SLAVE) ||
1650 !netdev_has_upper_dev(slave_dev, bond_dev)) { 1637 !netdev_has_upper_dev(slave_dev, bond_dev)) {
1651 pr_err("%s: Error: cannot release %s.\n", 1638 pr_err("%s: Error: cannot release %s\n",
1652 bond_dev->name, slave_dev->name); 1639 bond_dev->name, slave_dev->name);
1653 return -EINVAL; 1640 return -EINVAL;
1654 } 1641 }
@@ -1679,7 +1666,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1679 1666
1680 write_unlock_bh(&bond->lock); 1667 write_unlock_bh(&bond->lock);
1681 1668
1682 pr_info("%s: releasing %s interface %s\n", 1669 pr_info("%s: Releasing %s interface %s\n",
1683 bond_dev->name, 1670 bond_dev->name,
1684 bond_is_active_slave(slave) ? "active" : "backup", 1671 bond_is_active_slave(slave) ? "active" : "backup",
1685 slave_dev->name); 1672 slave_dev->name);
@@ -1692,10 +1679,10 @@ static int __bond_release_one(struct net_device *bond_dev,
1692 bond->params.mode != BOND_MODE_ACTIVEBACKUP)) { 1679 bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
1693 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 1680 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1694 bond_has_slaves(bond)) 1681 bond_has_slaves(bond))
1695 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", 1682 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
1696 bond_dev->name, slave_dev->name, 1683 bond_dev->name, slave_dev->name,
1697 slave->perm_hwaddr, 1684 slave->perm_hwaddr,
1698 bond_dev->name, slave_dev->name); 1685 bond_dev->name, slave_dev->name);
1699 } 1686 }
1700 1687
1701 if (bond->primary_slave == slave) 1688 if (bond->primary_slave == slave)
@@ -1736,10 +1723,10 @@ static int __bond_release_one(struct net_device *bond_dev,
1736 eth_hw_addr_random(bond_dev); 1723 eth_hw_addr_random(bond_dev);
1737 1724
1738 if (vlan_uses_dev(bond_dev)) { 1725 if (vlan_uses_dev(bond_dev)) {
1739 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 1726 pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
1740 bond_dev->name, bond_dev->name); 1727 bond_dev->name, bond_dev->name);
1741 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", 1728 pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
1742 bond_dev->name); 1729 bond_dev->name);
1743 } 1730 }
1744 } 1731 }
1745 1732
@@ -1755,7 +1742,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1755 bond_compute_features(bond); 1742 bond_compute_features(bond);
1756 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 1743 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
1757 (old_features & NETIF_F_VLAN_CHALLENGED)) 1744 (old_features & NETIF_F_VLAN_CHALLENGED))
1758 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", 1745 pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
1759 bond_dev->name, slave_dev->name, bond_dev->name); 1746 bond_dev->name, slave_dev->name, bond_dev->name);
1760 1747
1761 /* must do this from outside any spinlocks */ 1748 /* must do this from outside any spinlocks */
@@ -1790,7 +1777,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1790 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || 1777 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
1791 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1778 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
1792 /* restore original ("permanent") mac address */ 1779 /* restore original ("permanent") mac address */
1793 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 1780 ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
1794 addr.sa_family = slave_dev->type; 1781 addr.sa_family = slave_dev->type;
1795 dev_set_mac_address(slave_dev, &addr); 1782 dev_set_mac_address(slave_dev, &addr);
1796 } 1783 }
@@ -1823,7 +1810,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1823 ret = bond_release(bond_dev, slave_dev); 1810 ret = bond_release(bond_dev, slave_dev);
1824 if (ret == 0 && !bond_has_slaves(bond)) { 1811 if (ret == 0 && !bond_has_slaves(bond)) {
1825 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1812 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1826 pr_info("%s: destroying bond %s.\n", 1813 pr_info("%s: Destroying bond %s\n",
1827 bond_dev->name, bond_dev->name); 1814 bond_dev->name, bond_dev->name);
1828 unregister_netdevice(bond_dev); 1815 unregister_netdevice(bond_dev);
1829 } 1816 }
@@ -1837,9 +1824,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1837 info->bond_mode = bond->params.mode; 1824 info->bond_mode = bond->params.mode;
1838 info->miimon = bond->params.miimon; 1825 info->miimon = bond->params.miimon;
1839 1826
1840 read_lock(&bond->lock);
1841 info->num_slaves = bond->slave_cnt; 1827 info->num_slaves = bond->slave_cnt;
1842 read_unlock(&bond->lock);
1843 1828
1844 return 0; 1829 return 0;
1845} 1830}
@@ -1851,7 +1836,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
1851 int i = 0, res = -ENODEV; 1836 int i = 0, res = -ENODEV;
1852 struct slave *slave; 1837 struct slave *slave;
1853 1838
1854 read_lock(&bond->lock);
1855 bond_for_each_slave(bond, slave, iter) { 1839 bond_for_each_slave(bond, slave, iter) {
1856 if (i++ == (int)info->slave_id) { 1840 if (i++ == (int)info->slave_id) {
1857 res = 0; 1841 res = 0;
@@ -1862,7 +1846,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
1862 break; 1846 break;
1863 } 1847 }
1864 } 1848 }
1865 read_unlock(&bond->lock);
1866 1849
1867 return res; 1850 return res;
1868} 1851}
@@ -1892,7 +1875,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1892 slave->link = BOND_LINK_FAIL; 1875 slave->link = BOND_LINK_FAIL;
1893 slave->delay = bond->params.downdelay; 1876 slave->delay = bond->params.downdelay;
1894 if (slave->delay) { 1877 if (slave->delay) {
1895 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n", 1878 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
1896 bond->dev->name, 1879 bond->dev->name,
1897 (bond->params.mode == 1880 (bond->params.mode ==
1898 BOND_MODE_ACTIVEBACKUP) ? 1881 BOND_MODE_ACTIVEBACKUP) ?
@@ -1908,8 +1891,8 @@ static int bond_miimon_inspect(struct bonding *bond)
1908 * recovered before downdelay expired 1891 * recovered before downdelay expired
1909 */ 1892 */
1910 slave->link = BOND_LINK_UP; 1893 slave->link = BOND_LINK_UP;
1911 slave->jiffies = jiffies; 1894 slave->last_link_up = jiffies;
1912 pr_info("%s: link status up again after %d ms for interface %s.\n", 1895 pr_info("%s: link status up again after %d ms for interface %s\n",
1913 bond->dev->name, 1896 bond->dev->name,
1914 (bond->params.downdelay - slave->delay) * 1897 (bond->params.downdelay - slave->delay) *
1915 bond->params.miimon, 1898 bond->params.miimon,
@@ -1934,7 +1917,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1934 slave->delay = bond->params.updelay; 1917 slave->delay = bond->params.updelay;
1935 1918
1936 if (slave->delay) { 1919 if (slave->delay) {
1937 pr_info("%s: link status up for interface %s, enabling it in %d ms.\n", 1920 pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
1938 bond->dev->name, slave->dev->name, 1921 bond->dev->name, slave->dev->name,
1939 ignore_updelay ? 0 : 1922 ignore_updelay ? 0 :
1940 bond->params.updelay * 1923 bond->params.updelay *
@@ -1944,7 +1927,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1944 case BOND_LINK_BACK: 1927 case BOND_LINK_BACK:
1945 if (!link_state) { 1928 if (!link_state) {
1946 slave->link = BOND_LINK_DOWN; 1929 slave->link = BOND_LINK_DOWN;
1947 pr_info("%s: link status down again after %d ms for interface %s.\n", 1930 pr_info("%s: link status down again after %d ms for interface %s\n",
1948 bond->dev->name, 1931 bond->dev->name,
1949 (bond->params.updelay - slave->delay) * 1932 (bond->params.updelay - slave->delay) *
1950 bond->params.miimon, 1933 bond->params.miimon,
@@ -1983,7 +1966,7 @@ static void bond_miimon_commit(struct bonding *bond)
1983 1966
1984 case BOND_LINK_UP: 1967 case BOND_LINK_UP:
1985 slave->link = BOND_LINK_UP; 1968 slave->link = BOND_LINK_UP;
1986 slave->jiffies = jiffies; 1969 slave->last_link_up = jiffies;
1987 1970
1988 if (bond->params.mode == BOND_MODE_8023AD) { 1971 if (bond->params.mode == BOND_MODE_8023AD) {
1989 /* prevent it from being the active one */ 1972 /* prevent it from being the active one */
@@ -1996,7 +1979,7 @@ static void bond_miimon_commit(struct bonding *bond)
1996 bond_set_backup_slave(slave); 1979 bond_set_backup_slave(slave);
1997 } 1980 }
1998 1981
1999 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", 1982 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
2000 bond->dev->name, slave->dev->name, 1983 bond->dev->name, slave->dev->name,
2001 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, 1984 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2002 slave->duplex ? "full" : "half"); 1985 slave->duplex ? "full" : "half");
@@ -2141,24 +2124,40 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2141 * switches in VLAN mode (especially if ports are configured as 2124 * switches in VLAN mode (especially if ports are configured as
2142 * "native" to a VLAN) might not pass non-tagged frames. 2125 * "native" to a VLAN) might not pass non-tagged frames.
2143 */ 2126 */
2144static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id) 2127static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2128 __be32 dest_ip, __be32 src_ip,
2129 struct bond_vlan_tag *inner,
2130 struct bond_vlan_tag *outer)
2145{ 2131{
2146 struct sk_buff *skb; 2132 struct sk_buff *skb;
2147 2133
2148 pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op, 2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
2149 slave_dev->name, &dest_ip, &src_ip, vlan_id); 2135 arp_op, slave_dev->name, &dest_ip, &src_ip);
2150 2136
2151 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, 2137 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2152 NULL, slave_dev->dev_addr, NULL); 2138 NULL, slave_dev->dev_addr, NULL);
2153 2139
2154 if (!skb) { 2140 if (!skb) {
2155 pr_err("ARP packet allocation failed\n"); 2141 net_err_ratelimited("ARP packet allocation failed\n");
2156 return; 2142 return;
2157 } 2143 }
2158 if (vlan_id) { 2144 if (outer->vlan_id) {
2159 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id); 2145 if (inner->vlan_id) {
2146 pr_debug("inner tag: proto %X vid %X\n",
2147 ntohs(inner->vlan_proto), inner->vlan_id);
2148 skb = __vlan_put_tag(skb, inner->vlan_proto,
2149 inner->vlan_id);
2150 if (!skb) {
2151 net_err_ratelimited("failed to insert inner VLAN tag\n");
2152 return;
2153 }
2154 }
2155
2156 pr_debug("outer reg: proto %X vid %X\n",
2157 ntohs(outer->vlan_proto), outer->vlan_id);
2158 skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
2160 if (!skb) { 2159 if (!skb) {
2161 pr_err("failed to insert VLAN tag\n"); 2160 net_err_ratelimited("failed to insert outer VLAN tag\n");
2162 return; 2161 return;
2163 } 2162 }
2164 } 2163 }
@@ -2171,23 +2170,32 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2171 struct net_device *upper, *vlan_upper; 2170 struct net_device *upper, *vlan_upper;
2172 struct list_head *iter, *vlan_iter; 2171 struct list_head *iter, *vlan_iter;
2173 struct rtable *rt; 2172 struct rtable *rt;
2173 struct bond_vlan_tag inner, outer;
2174 __be32 *targets = bond->params.arp_targets, addr; 2174 __be32 *targets = bond->params.arp_targets, addr;
2175 int i, vlan_id; 2175 int i;
2176 2176
2177 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2177 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2178 pr_debug("basa: target %pI4\n", &targets[i]); 2178 pr_debug("basa: target %pI4\n", &targets[i]);
2179 inner.vlan_proto = 0;
2180 inner.vlan_id = 0;
2181 outer.vlan_proto = 0;
2182 outer.vlan_id = 0;
2179 2183
2180 /* Find out through which dev should the packet go */ 2184 /* Find out through which dev should the packet go */
2181 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2185 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2182 RTO_ONLINK, 0); 2186 RTO_ONLINK, 0);
2183 if (IS_ERR(rt)) { 2187 if (IS_ERR(rt)) {
2184 pr_debug("%s: no route to arp_ip_target %pI4\n", 2188 /* there's no route to target - try to send arp
2185 bond->dev->name, &targets[i]); 2189 * probe to generate any traffic (arp_validate=0)
2190 */
2191 if (bond->params.arp_validate)
2192 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2193 bond->dev->name,
2194 &targets[i]);
2195 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
2186 continue; 2196 continue;
2187 } 2197 }
2188 2198
2189 vlan_id = 0;
2190
2191 /* bond device itself */ 2199 /* bond device itself */
2192 if (rt->dst.dev == bond->dev) 2200 if (rt->dst.dev == bond->dev)
2193 goto found; 2201 goto found;
@@ -2197,17 +2205,30 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2197 * found we verify its upper dev list, searching for the 2205 * found we verify its upper dev list, searching for the
2198 * rt->dst.dev. If found we save the tag of the vlan and 2206 * rt->dst.dev. If found we save the tag of the vlan and
2199 * proceed to send the packet. 2207 * proceed to send the packet.
2200 *
2201 * TODO: QinQ?
2202 */ 2208 */
2203 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper, 2209 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
2204 vlan_iter) { 2210 vlan_iter) {
2205 if (!is_vlan_dev(vlan_upper)) 2211 if (!is_vlan_dev(vlan_upper))
2206 continue; 2212 continue;
2213
2214 if (vlan_upper == rt->dst.dev) {
2215 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2216 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2217 rcu_read_unlock();
2218 goto found;
2219 }
2207 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper, 2220 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
2208 iter) { 2221 iter) {
2209 if (upper == rt->dst.dev) { 2222 if (upper == rt->dst.dev) {
2210 vlan_id = vlan_dev_vlan_id(vlan_upper); 2223 /* If the upper dev is a vlan dev too,
2224 * set the vlan tag to inner tag.
2225 */
2226 if (is_vlan_dev(upper)) {
2227 inner.vlan_proto = vlan_dev_vlan_proto(upper);
2228 inner.vlan_id = vlan_dev_vlan_id(upper);
2229 }
2230 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2231 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2211 rcu_read_unlock(); 2232 rcu_read_unlock();
2212 goto found; 2233 goto found;
2213 } 2234 }
@@ -2220,10 +2241,6 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2220 */ 2241 */
2221 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 2242 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2222 if (upper == rt->dst.dev) { 2243 if (upper == rt->dst.dev) {
2223 /* if it's a vlan - get its VID */
2224 if (is_vlan_dev(upper))
2225 vlan_id = vlan_dev_vlan_id(upper);
2226
2227 rcu_read_unlock(); 2244 rcu_read_unlock();
2228 goto found; 2245 goto found;
2229 } 2246 }
@@ -2242,7 +2259,7 @@ found:
2242 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2259 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2243 ip_rt_put(rt); 2260 ip_rt_put(rt);
2244 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2261 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2245 addr, vlan_id); 2262 addr, &inner, &outer);
2246 } 2263 }
2247} 2264}
2248 2265
@@ -2260,7 +2277,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2260 pr_debug("bva: sip %pI4 not found in targets\n", &sip); 2277 pr_debug("bva: sip %pI4 not found in targets\n", &sip);
2261 return; 2278 return;
2262 } 2279 }
2263 slave->last_arp_rx = jiffies; 2280 slave->last_rx = jiffies;
2264 slave->target_last_arp_rx[i] = jiffies; 2281 slave->target_last_arp_rx[i] = jiffies;
2265} 2282}
2266 2283
@@ -2268,17 +2285,19 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2268 struct slave *slave) 2285 struct slave *slave)
2269{ 2286{
2270 struct arphdr *arp = (struct arphdr *)skb->data; 2287 struct arphdr *arp = (struct arphdr *)skb->data;
2288 struct slave *curr_active_slave;
2271 unsigned char *arp_ptr; 2289 unsigned char *arp_ptr;
2272 __be32 sip, tip; 2290 __be32 sip, tip;
2273 int alen; 2291 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2274 2292
2275 if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) 2293 if (!slave_do_arp_validate(bond, slave)) {
2294 if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
2295 !slave_do_arp_validate_only(bond, slave))
2296 slave->last_rx = jiffies;
2276 return RX_HANDLER_ANOTHER; 2297 return RX_HANDLER_ANOTHER;
2277 2298 } else if (!is_arp) {
2278 read_lock(&bond->lock); 2299 return RX_HANDLER_ANOTHER;
2279 2300 }
2280 if (!slave_do_arp_validate(bond, slave))
2281 goto out_unlock;
2282 2301
2283 alen = arp_hdr_len(bond->dev); 2302 alen = arp_hdr_len(bond->dev);
2284 2303
@@ -2312,6 +2331,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2312 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 2331 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2313 &sip, &tip); 2332 &sip, &tip);
2314 2333
2334 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2335
2315 /* 2336 /*
2316 * Backup slaves won't see the ARP reply, but do come through 2337 * Backup slaves won't see the ARP reply, but do come through
2317 * here for each ARP probe (so we swap the sip/tip to validate 2338 * here for each ARP probe (so we swap the sip/tip to validate
@@ -2325,15 +2346,15 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2325 * is done to avoid endless looping when we can't reach the 2346 * is done to avoid endless looping when we can't reach the
2326 * arp_ip_target and fool ourselves with our own arp requests. 2347 * arp_ip_target and fool ourselves with our own arp requests.
2327 */ 2348 */
2349
2328 if (bond_is_active_slave(slave)) 2350 if (bond_is_active_slave(slave))
2329 bond_validate_arp(bond, slave, sip, tip); 2351 bond_validate_arp(bond, slave, sip, tip);
2330 else if (bond->curr_active_slave && 2352 else if (curr_active_slave &&
2331 time_after(slave_last_rx(bond, bond->curr_active_slave), 2353 time_after(slave_last_rx(bond, curr_active_slave),
2332 bond->curr_active_slave->jiffies)) 2354 curr_active_slave->last_link_up))
2333 bond_validate_arp(bond, slave, tip, sip); 2355 bond_validate_arp(bond, slave, tip, sip);
2334 2356
2335out_unlock: 2357out_unlock:
2336 read_unlock(&bond->lock);
2337 if (arp != (struct arphdr *)skb->data) 2358 if (arp != (struct arphdr *)skb->data)
2338 kfree(arp); 2359 kfree(arp);
2339 return RX_HANDLER_ANOTHER; 2360 return RX_HANDLER_ANOTHER;
@@ -2376,9 +2397,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2376 oldcurrent = ACCESS_ONCE(bond->curr_active_slave); 2397 oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
2377 /* see if any of the previous devices are up now (i.e. they have 2398 /* see if any of the previous devices are up now (i.e. they have
2378 * xmt and rcv traffic). the curr_active_slave does not come into 2399 * xmt and rcv traffic). the curr_active_slave does not come into
2379 * the picture unless it is null. also, slave->jiffies is not needed 2400 * the picture unless it is null. also, slave->last_link_up is not
2380 * here because we send an arp on each slave and give a slave as 2401 * needed here because we send an arp on each slave and give a slave
2381 * long as it needs to get the tx/rx within the delta. 2402 * as long as it needs to get the tx/rx within the delta.
2382 * TODO: what about up/down delay in arp mode? it wasn't here before 2403 * TODO: what about up/down delay in arp mode? it wasn't here before
2383 * so it can wait 2404 * so it can wait
2384 */ 2405 */
@@ -2387,7 +2408,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2387 2408
2388 if (slave->link != BOND_LINK_UP) { 2409 if (slave->link != BOND_LINK_UP) {
2389 if (bond_time_in_interval(bond, trans_start, 1) && 2410 if (bond_time_in_interval(bond, trans_start, 1) &&
2390 bond_time_in_interval(bond, slave->dev->last_rx, 1)) { 2411 bond_time_in_interval(bond, slave->last_rx, 1)) {
2391 2412
2392 slave->link = BOND_LINK_UP; 2413 slave->link = BOND_LINK_UP;
2393 slave_state_changed = 1; 2414 slave_state_changed = 1;
@@ -2398,7 +2419,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2398 * is closed. 2419 * is closed.
2399 */ 2420 */
2400 if (!oldcurrent) { 2421 if (!oldcurrent) {
2401 pr_info("%s: link status definitely up for interface %s, ", 2422 pr_info("%s: link status definitely up for interface %s\n",
2402 bond->dev->name, 2423 bond->dev->name,
2403 slave->dev->name); 2424 slave->dev->name);
2404 do_failover = 1; 2425 do_failover = 1;
@@ -2416,7 +2437,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2416 * if we don't know our ip yet 2437 * if we don't know our ip yet
2417 */ 2438 */
2418 if (!bond_time_in_interval(bond, trans_start, 2) || 2439 if (!bond_time_in_interval(bond, trans_start, 2) ||
2419 !bond_time_in_interval(bond, slave->dev->last_rx, 2)) { 2440 !bond_time_in_interval(bond, slave->last_rx, 2)) {
2420 2441
2421 slave->link = BOND_LINK_DOWN; 2442 slave->link = BOND_LINK_DOWN;
2422 slave_state_changed = 1; 2443 slave_state_changed = 1;
@@ -2424,9 +2445,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2424 if (slave->link_failure_count < UINT_MAX) 2445 if (slave->link_failure_count < UINT_MAX)
2425 slave->link_failure_count++; 2446 slave->link_failure_count++;
2426 2447
2427 pr_info("%s: interface %s is now down.\n", 2448 pr_info("%s: interface %s is now down\n",
2428 bond->dev->name, 2449 bond->dev->name, slave->dev->name);
2429 slave->dev->name);
2430 2450
2431 if (slave == oldcurrent) 2451 if (slave == oldcurrent)
2432 do_failover = 1; 2452 do_failover = 1;
@@ -2505,7 +2525,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2505 * active. This avoids bouncing, as the last receive 2525 * active. This avoids bouncing, as the last receive
2506 * times need a full ARP monitor cycle to be updated. 2526 * times need a full ARP monitor cycle to be updated.
2507 */ 2527 */
2508 if (bond_time_in_interval(bond, slave->jiffies, 2)) 2528 if (bond_time_in_interval(bond, slave->last_link_up, 2))
2509 continue; 2529 continue;
2510 2530
2511 /* 2531 /*
@@ -2576,7 +2596,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
2576 bond->current_arp_slave = NULL; 2596 bond->current_arp_slave = NULL;
2577 } 2597 }
2578 2598
2579 pr_info("%s: link status definitely up for interface %s.\n", 2599 pr_info("%s: link status definitely up for interface %s\n",
2580 bond->dev->name, slave->dev->name); 2600 bond->dev->name, slave->dev->name);
2581 2601
2582 if (!bond->curr_active_slave || 2602 if (!bond->curr_active_slave ||
@@ -2682,7 +2702,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2682 bond_set_slave_inactive_flags(slave, 2702 bond_set_slave_inactive_flags(slave,
2683 BOND_SLAVE_NOTIFY_LATER); 2703 BOND_SLAVE_NOTIFY_LATER);
2684 2704
2685 pr_info("%s: backup interface %s is now down.\n", 2705 pr_info("%s: backup interface %s is now down\n",
2686 bond->dev->name, slave->dev->name); 2706 bond->dev->name, slave->dev->name);
2687 } 2707 }
2688 if (slave == curr_arp_slave) 2708 if (slave == curr_arp_slave)
@@ -2698,7 +2718,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2698 new_slave->link = BOND_LINK_BACK; 2718 new_slave->link = BOND_LINK_BACK;
2699 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); 2719 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2700 bond_arp_send_all(bond, new_slave); 2720 bond_arp_send_all(bond, new_slave);
2701 new_slave->jiffies = jiffies; 2721 new_slave->last_link_up = jiffies;
2702 rcu_assign_pointer(bond->current_arp_slave, new_slave); 2722 rcu_assign_pointer(bond->current_arp_slave, new_slave);
2703 2723
2704check_state: 2724check_state:
@@ -2879,9 +2899,9 @@ static int bond_slave_netdev_event(unsigned long event,
2879 break; 2899 break;
2880 } 2900 }
2881 2901
2882 pr_info("%s: Primary slave changed to %s, reselecting active slave.\n", 2902 pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
2883 bond->dev->name, bond->primary_slave ? slave_dev->name : 2903 bond->dev->name,
2884 "none"); 2904 bond->primary_slave ? slave_dev->name : "none");
2885 2905
2886 block_netpoll_tx(); 2906 block_netpoll_tx();
2887 write_lock_bh(&bond->curr_slave_lock); 2907 write_lock_bh(&bond->curr_slave_lock);
@@ -2917,8 +2937,7 @@ static int bond_netdev_event(struct notifier_block *this,
2917 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2937 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2918 2938
2919 pr_debug("event_dev: %s, event: %lx\n", 2939 pr_debug("event_dev: %s, event: %lx\n",
2920 event_dev ? event_dev->name : "None", 2940 event_dev ? event_dev->name : "None", event);
2921 event);
2922 2941
2923 if (!(event_dev->priv_flags & IFF_BONDING)) 2942 if (!(event_dev->priv_flags & IFF_BONDING))
2924 return NOTIFY_DONE; 2943 return NOTIFY_DONE;
@@ -2967,7 +2986,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
2967 fk->ports = 0; 2986 fk->ports = 0;
2968 noff = skb_network_offset(skb); 2987 noff = skb_network_offset(skb);
2969 if (skb->protocol == htons(ETH_P_IP)) { 2988 if (skb->protocol == htons(ETH_P_IP)) {
2970 if (!pskb_may_pull(skb, noff + sizeof(*iph))) 2989 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
2971 return false; 2990 return false;
2972 iph = ip_hdr(skb); 2991 iph = ip_hdr(skb);
2973 fk->src = iph->saddr; 2992 fk->src = iph->saddr;
@@ -2976,7 +2995,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
2976 if (!ip_is_fragment(iph)) 2995 if (!ip_is_fragment(iph))
2977 proto = iph->protocol; 2996 proto = iph->protocol;
2978 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2997 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2979 if (!pskb_may_pull(skb, noff + sizeof(*iph6))) 2998 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
2980 return false; 2999 return false;
2981 iph6 = ipv6_hdr(skb); 3000 iph6 = ipv6_hdr(skb);
2982 fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr); 3001 fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
@@ -3087,8 +3106,7 @@ static int bond_open(struct net_device *bond_dev)
3087 3106
3088 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 3107 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3089 queue_delayed_work(bond->wq, &bond->arp_work, 0); 3108 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3090 if (bond->params.arp_validate) 3109 bond->recv_probe = bond_arp_rcv;
3091 bond->recv_probe = bond_arp_rcv;
3092 } 3110 }
3093 3111
3094 if (bond->params.mode == BOND_MODE_8023AD) { 3112 if (bond->params.mode == BOND_MODE_8023AD) {
@@ -3375,8 +3393,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3375 struct list_head *iter; 3393 struct list_head *iter;
3376 int res = 0; 3394 int res = 0;
3377 3395
3378 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond, 3396 pr_debug("bond=%p, name=%s, new_mtu=%d\n",
3379 (bond_dev ? bond_dev->name : "None"), new_mtu); 3397 bond, bond_dev ? bond_dev->name : "None", new_mtu);
3380 3398
3381 /* Can't hold bond->lock with bh disabled here since 3399 /* Can't hold bond->lock with bh disabled here since
3382 * some base drivers panic. On the other hand we can't 3400 * some base drivers panic. On the other hand we can't
@@ -3395,8 +3413,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3395 3413
3396 bond_for_each_slave(bond, slave, iter) { 3414 bond_for_each_slave(bond, slave, iter) {
3397 pr_debug("s %p c_m %p\n", 3415 pr_debug("s %p c_m %p\n",
3398 slave, 3416 slave, slave->dev->netdev_ops->ndo_change_mtu);
3399 slave->dev->netdev_ops->ndo_change_mtu);
3400 3417
3401 res = dev_set_mtu(slave->dev, new_mtu); 3418 res = dev_set_mtu(slave->dev, new_mtu);
3402 3419
@@ -3484,15 +3501,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3484 */ 3501 */
3485 3502
3486 bond_for_each_slave(bond, slave, iter) { 3503 bond_for_each_slave(bond, slave, iter) {
3487 const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
3488 pr_debug("slave %p %s\n", slave, slave->dev->name); 3504 pr_debug("slave %p %s\n", slave, slave->dev->name);
3489
3490 if (slave_ops->ndo_set_mac_address == NULL) {
3491 res = -EOPNOTSUPP;
3492 pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
3493 goto unwind;
3494 }
3495
3496 res = dev_set_mac_address(slave->dev, addr); 3505 res = dev_set_mac_address(slave->dev, addr);
3497 if (res) { 3506 if (res) {
3498 /* TODO: consider downing the slave 3507 /* TODO: consider downing the slave
@@ -3568,7 +3577,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
3568 } 3577 }
3569 } 3578 }
3570 /* no slave that can tx has been found */ 3579 /* no slave that can tx has been found */
3571 kfree_skb(skb); 3580 dev_kfree_skb_any(skb);
3572} 3581}
3573 3582
3574/** 3583/**
@@ -3644,7 +3653,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
3644 if (slave) 3653 if (slave)
3645 bond_dev_queue_xmit(bond, skb, slave->dev); 3654 bond_dev_queue_xmit(bond, skb, slave->dev);
3646 else 3655 else
3647 kfree_skb(skb); 3656 dev_kfree_skb_any(skb);
3648 3657
3649 return NETDEV_TX_OK; 3658 return NETDEV_TX_OK;
3650} 3659}
@@ -3676,8 +3685,8 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3676 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 3685 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3677 3686
3678 if (!skb2) { 3687 if (!skb2) {
3679 pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n", 3688 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
3680 bond_dev->name); 3689 bond_dev->name, __func__);
3681 continue; 3690 continue;
3682 } 3691 }
3683 /* bond_dev_queue_xmit always returns 0 */ 3692 /* bond_dev_queue_xmit always returns 0 */
@@ -3687,7 +3696,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3687 if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP) 3696 if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
3688 bond_dev_queue_xmit(bond, skb, slave->dev); 3697 bond_dev_queue_xmit(bond, skb, slave->dev);
3689 else 3698 else
3690 kfree_skb(skb); 3699 dev_kfree_skb_any(skb);
3691 3700
3692 return NETDEV_TX_OK; 3701 return NETDEV_TX_OK;
3693} 3702}
@@ -3774,7 +3783,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
3774 pr_err("%s: Error: Unknown bonding mode %d\n", 3783 pr_err("%s: Error: Unknown bonding mode %d\n",
3775 dev->name, bond->params.mode); 3784 dev->name, bond->params.mode);
3776 WARN_ON_ONCE(1); 3785 WARN_ON_ONCE(1);
3777 kfree_skb(skb); 3786 dev_kfree_skb_any(skb);
3778 return NETDEV_TX_OK; 3787 return NETDEV_TX_OK;
3779 } 3788 }
3780} 3789}
@@ -3788,14 +3797,14 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
3788 * If we risk deadlock from transmitting this in the 3797 * If we risk deadlock from transmitting this in the
3789 * netpoll path, tell netpoll to queue the frame for later tx 3798 * netpoll path, tell netpoll to queue the frame for later tx
3790 */ 3799 */
3791 if (is_netpoll_tx_blocked(dev)) 3800 if (unlikely(is_netpoll_tx_blocked(dev)))
3792 return NETDEV_TX_BUSY; 3801 return NETDEV_TX_BUSY;
3793 3802
3794 rcu_read_lock(); 3803 rcu_read_lock();
3795 if (bond_has_slaves(bond)) 3804 if (bond_has_slaves(bond))
3796 ret = __bond_start_xmit(skb, dev); 3805 ret = __bond_start_xmit(skb, dev);
3797 else 3806 else
3798 kfree_skb(skb); 3807 dev_kfree_skb_any(skb);
3799 rcu_read_unlock(); 3808 rcu_read_unlock();
3800 3809
3801 return ret; 3810 return ret;
@@ -3958,7 +3967,7 @@ static void bond_uninit(struct net_device *bond_dev)
3958 /* Release the bonded slaves */ 3967 /* Release the bonded slaves */
3959 bond_for_each_slave(bond, slave, iter) 3968 bond_for_each_slave(bond, slave, iter)
3960 __bond_release_one(bond_dev, slave->dev, true); 3969 __bond_release_one(bond_dev, slave->dev, true);
3961 pr_info("%s: released all slaves\n", bond_dev->name); 3970 pr_info("%s: Released all slaves\n", bond_dev->name);
3962 3971
3963 list_del(&bond->bond_list); 3972 list_del(&bond->bond_list);
3964 3973
@@ -3967,56 +3976,11 @@ static void bond_uninit(struct net_device *bond_dev)
3967 3976
3968/*------------------------- Module initialization ---------------------------*/ 3977/*------------------------- Module initialization ---------------------------*/
3969 3978
3970int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
3971{
3972 int i;
3973
3974 for (i = 0; tbl[i].modename; i++)
3975 if (mode == tbl[i].mode)
3976 return tbl[i].mode;
3977
3978 return -1;
3979}
3980
3981static int bond_parm_tbl_lookup_name(const char *modename,
3982 const struct bond_parm_tbl *tbl)
3983{
3984 int i;
3985
3986 for (i = 0; tbl[i].modename; i++)
3987 if (strcmp(modename, tbl[i].modename) == 0)
3988 return tbl[i].mode;
3989
3990 return -1;
3991}
3992
3993/*
3994 * Convert string input module parms. Accept either the
3995 * number of the mode or its string name. A bit complicated because
3996 * some mode names are substrings of other names, and calls from sysfs
3997 * may have whitespace in the name (trailing newlines, for example).
3998 */
3999int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
4000{
4001 int modeint;
4002 char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
4003
4004 for (p = (char *)buf; *p; p++)
4005 if (!(isdigit(*p) || isspace(*p)))
4006 break;
4007
4008 if (*p && sscanf(buf, "%20s", modestr) != 0)
4009 return bond_parm_tbl_lookup_name(modestr, tbl);
4010 else if (sscanf(buf, "%d", &modeint) != 0)
4011 return bond_parm_tbl_lookup(modeint, tbl);
4012
4013 return -1;
4014}
4015
4016static int bond_check_params(struct bond_params *params) 3979static int bond_check_params(struct bond_params *params)
4017{ 3980{
4018 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; 3981 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4019 struct bond_opt_value newval, *valptr; 3982 struct bond_opt_value newval;
3983 const struct bond_opt_value *valptr;
4020 int arp_all_targets_value; 3984 int arp_all_targets_value;
4021 3985
4022 /* 3986 /*
@@ -4036,7 +4000,7 @@ static int bond_check_params(struct bond_params *params)
4036 if ((bond_mode != BOND_MODE_XOR) && 4000 if ((bond_mode != BOND_MODE_XOR) &&
4037 (bond_mode != BOND_MODE_8023AD)) { 4001 (bond_mode != BOND_MODE_8023AD)) {
4038 pr_info("xmit_hash_policy param is irrelevant in mode %s\n", 4002 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
4039 bond_mode_name(bond_mode)); 4003 bond_mode_name(bond_mode));
4040 } else { 4004 } else {
4041 bond_opt_initstr(&newval, xmit_hash_policy); 4005 bond_opt_initstr(&newval, xmit_hash_policy);
4042 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH), 4006 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
@@ -4077,74 +4041,71 @@ static int bond_check_params(struct bond_params *params)
4077 } 4041 }
4078 params->ad_select = valptr->value; 4042 params->ad_select = valptr->value;
4079 if (bond_mode != BOND_MODE_8023AD) 4043 if (bond_mode != BOND_MODE_8023AD)
4080 pr_warning("ad_select param only affects 802.3ad mode\n"); 4044 pr_warn("ad_select param only affects 802.3ad mode\n");
4081 } else { 4045 } else {
4082 params->ad_select = BOND_AD_STABLE; 4046 params->ad_select = BOND_AD_STABLE;
4083 } 4047 }
4084 4048
4085 if (max_bonds < 0) { 4049 if (max_bonds < 0) {
4086 pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n", 4050 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4087 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS); 4051 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4088 max_bonds = BOND_DEFAULT_MAX_BONDS; 4052 max_bonds = BOND_DEFAULT_MAX_BONDS;
4089 } 4053 }
4090 4054
4091 if (miimon < 0) { 4055 if (miimon < 0) {
4092 pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n", 4056 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4093 miimon, INT_MAX); 4057 miimon, INT_MAX);
4094 miimon = 0; 4058 miimon = 0;
4095 } 4059 }
4096 4060
4097 if (updelay < 0) { 4061 if (updelay < 0) {
4098 pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 4062 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4099 updelay, INT_MAX); 4063 updelay, INT_MAX);
4100 updelay = 0; 4064 updelay = 0;
4101 } 4065 }
4102 4066
4103 if (downdelay < 0) { 4067 if (downdelay < 0) {
4104 pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n", 4068 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4105 downdelay, INT_MAX); 4069 downdelay, INT_MAX);
4106 downdelay = 0; 4070 downdelay = 0;
4107 } 4071 }
4108 4072
4109 if ((use_carrier != 0) && (use_carrier != 1)) { 4073 if ((use_carrier != 0) && (use_carrier != 1)) {
4110 pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n", 4074 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
4111 use_carrier); 4075 use_carrier);
4112 use_carrier = 1; 4076 use_carrier = 1;
4113 } 4077 }
4114 4078
4115 if (num_peer_notif < 0 || num_peer_notif > 255) { 4079 if (num_peer_notif < 0 || num_peer_notif > 255) {
4116 pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", 4080 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
4117 num_peer_notif); 4081 num_peer_notif);
4118 num_peer_notif = 1; 4082 num_peer_notif = 1;
4119 } 4083 }
4120 4084
4121 /* reset values for 802.3ad/TLB/ALB */ 4085 /* reset values for 802.3ad/TLB/ALB */
4122 if (BOND_NO_USES_ARP(bond_mode)) { 4086 if (BOND_NO_USES_ARP(bond_mode)) {
4123 if (!miimon) { 4087 if (!miimon) {
4124 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 4088 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4125 pr_warning("Forcing miimon to 100msec\n"); 4089 pr_warn("Forcing miimon to 100msec\n");
4126 miimon = BOND_DEFAULT_MIIMON; 4090 miimon = BOND_DEFAULT_MIIMON;
4127 } 4091 }
4128 } 4092 }
4129 4093
4130 if (tx_queues < 1 || tx_queues > 255) { 4094 if (tx_queues < 1 || tx_queues > 255) {
4131 pr_warning("Warning: tx_queues (%d) should be between " 4095 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
4132 "1 and 255, resetting to %d\n", 4096 tx_queues, BOND_DEFAULT_TX_QUEUES);
4133 tx_queues, BOND_DEFAULT_TX_QUEUES);
4134 tx_queues = BOND_DEFAULT_TX_QUEUES; 4097 tx_queues = BOND_DEFAULT_TX_QUEUES;
4135 } 4098 }
4136 4099
4137 if ((all_slaves_active != 0) && (all_slaves_active != 1)) { 4100 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4138 pr_warning("Warning: all_slaves_active module parameter (%d), " 4101 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
4139 "not of valid value (0/1), so it was set to " 4102 all_slaves_active);
4140 "0\n", all_slaves_active);
4141 all_slaves_active = 0; 4103 all_slaves_active = 0;
4142 } 4104 }
4143 4105
4144 if (resend_igmp < 0 || resend_igmp > 255) { 4106 if (resend_igmp < 0 || resend_igmp > 255) {
4145 pr_warning("Warning: resend_igmp (%d) should be between " 4107 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
4146 "0 and 255, resetting to %d\n", 4108 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4147 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4148 resend_igmp = BOND_DEFAULT_RESEND_IGMP; 4109 resend_igmp = BOND_DEFAULT_RESEND_IGMP;
4149 } 4110 }
4150 4111
@@ -4165,37 +4126,36 @@ static int bond_check_params(struct bond_params *params)
4165 /* just warn the user the up/down delay will have 4126 /* just warn the user the up/down delay will have
4166 * no effect since miimon is zero... 4127 * no effect since miimon is zero...
4167 */ 4128 */
4168 pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n", 4129 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
4169 updelay, downdelay); 4130 updelay, downdelay);
4170 } 4131 }
4171 } else { 4132 } else {
4172 /* don't allow arp monitoring */ 4133 /* don't allow arp monitoring */
4173 if (arp_interval) { 4134 if (arp_interval) {
4174 pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n", 4135 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
4175 miimon, arp_interval); 4136 miimon, arp_interval);
4176 arp_interval = 0; 4137 arp_interval = 0;
4177 } 4138 }
4178 4139
4179 if ((updelay % miimon) != 0) { 4140 if ((updelay % miimon) != 0) {
4180 pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n", 4141 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
4181 updelay, miimon, 4142 updelay, miimon, (updelay / miimon) * miimon);
4182 (updelay / miimon) * miimon);
4183 } 4143 }
4184 4144
4185 updelay /= miimon; 4145 updelay /= miimon;
4186 4146
4187 if ((downdelay % miimon) != 0) { 4147 if ((downdelay % miimon) != 0) {
4188 pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n", 4148 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
4189 downdelay, miimon, 4149 downdelay, miimon,
4190 (downdelay / miimon) * miimon); 4150 (downdelay / miimon) * miimon);
4191 } 4151 }
4192 4152
4193 downdelay /= miimon; 4153 downdelay /= miimon;
4194 } 4154 }
4195 4155
4196 if (arp_interval < 0) { 4156 if (arp_interval < 0) {
4197 pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n", 4157 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4198 arp_interval, INT_MAX); 4158 arp_interval, INT_MAX);
4199 arp_interval = 0; 4159 arp_interval = 0;
4200 } 4160 }
4201 4161
@@ -4206,30 +4166,26 @@ static int bond_check_params(struct bond_params *params)
4206 __be32 ip; 4166 __be32 ip;
4207 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || 4167 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4208 IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) { 4168 IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
4209 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4169 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4210 arp_ip_target[i]); 4170 arp_ip_target[i]);
4211 arp_interval = 0; 4171 arp_interval = 0;
4212 } else { 4172 } else {
4213 if (bond_get_targets_ip(arp_target, ip) == -1) 4173 if (bond_get_targets_ip(arp_target, ip) == -1)
4214 arp_target[arp_ip_count++] = ip; 4174 arp_target[arp_ip_count++] = ip;
4215 else 4175 else
4216 pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n", 4176 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
4217 &ip); 4177 &ip);
4218 } 4178 }
4219 } 4179 }
4220 4180
4221 if (arp_interval && !arp_ip_count) { 4181 if (arp_interval && !arp_ip_count) {
4222 /* don't allow arping if no arp_ip_target given... */ 4182 /* don't allow arping if no arp_ip_target given... */
4223 pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n", 4183 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
4224 arp_interval); 4184 arp_interval);
4225 arp_interval = 0; 4185 arp_interval = 0;
4226 } 4186 }
4227 4187
4228 if (arp_validate) { 4188 if (arp_validate) {
4229 if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
4230 pr_err("arp_validate only supported in active-backup mode\n");
4231 return -EINVAL;
4232 }
4233 if (!arp_interval) { 4189 if (!arp_interval) {
4234 pr_err("arp_validate requires arp_interval\n"); 4190 pr_err("arp_validate requires arp_interval\n");
4235 return -EINVAL; 4191 return -EINVAL;
@@ -4271,23 +4227,23 @@ static int bond_check_params(struct bond_params *params)
4271 arp_interval, valptr->string, arp_ip_count); 4227 arp_interval, valptr->string, arp_ip_count);
4272 4228
4273 for (i = 0; i < arp_ip_count; i++) 4229 for (i = 0; i < arp_ip_count; i++)
4274 pr_info(" %s", arp_ip_target[i]); 4230 pr_cont(" %s", arp_ip_target[i]);
4275 4231
4276 pr_info("\n"); 4232 pr_cont("\n");
4277 4233
4278 } else if (max_bonds) { 4234 } else if (max_bonds) {
4279 /* miimon and arp_interval not set, we need one so things 4235 /* miimon and arp_interval not set, we need one so things
4280 * work as expected, see bonding.txt for details 4236 * work as expected, see bonding.txt for details
4281 */ 4237 */
4282 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n"); 4238 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
4283 } 4239 }
4284 4240
4285 if (primary && !USES_PRIMARY(bond_mode)) { 4241 if (primary && !USES_PRIMARY(bond_mode)) {
4286 /* currently, using a primary only makes sense 4242 /* currently, using a primary only makes sense
4287 * in active backup, TLB or ALB modes 4243 * in active backup, TLB or ALB modes
4288 */ 4244 */
4289 pr_warning("Warning: %s primary device specified but has no effect in %s mode\n", 4245 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
4290 primary, bond_mode_name(bond_mode)); 4246 primary, bond_mode_name(bond_mode));
4291 primary = NULL; 4247 primary = NULL;
4292 } 4248 }
4293 4249
@@ -4316,14 +4272,14 @@ static int bond_check_params(struct bond_params *params)
4316 } 4272 }
4317 fail_over_mac_value = valptr->value; 4273 fail_over_mac_value = valptr->value;
4318 if (bond_mode != BOND_MODE_ACTIVEBACKUP) 4274 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4319 pr_warning("Warning: fail_over_mac only affects active-backup mode.\n"); 4275 pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
4320 } else { 4276 } else {
4321 fail_over_mac_value = BOND_FOM_NONE; 4277 fail_over_mac_value = BOND_FOM_NONE;
4322 } 4278 }
4323 4279
4324 if (lp_interval == 0) { 4280 if (lp_interval == 0) {
4325 pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", 4281 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
4326 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL); 4282 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
4327 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 4283 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
4328 } 4284 }
4329 4285
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 70651f8e8e3b..f847e165d252 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -181,7 +181,7 @@ static int bond_changelink(struct net_device *bond_dev,
181 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]); 181 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
182 182
183 if (arp_interval && miimon) { 183 if (arp_interval && miimon) {
184 pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n", 184 pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
185 bond->dev->name); 185 bond->dev->name);
186 return -EINVAL; 186 return -EINVAL;
187 } 187 }
@@ -199,7 +199,7 @@ static int bond_changelink(struct net_device *bond_dev,
199 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) { 199 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
200 __be32 target = nla_get_be32(attr); 200 __be32 target = nla_get_be32(attr);
201 201
202 bond_opt_initval(&newval, target); 202 bond_opt_initval(&newval, (__force u64)target);
203 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS, 203 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
204 &newval); 204 &newval);
205 if (err) 205 if (err)
@@ -207,7 +207,7 @@ static int bond_changelink(struct net_device *bond_dev,
207 i++; 207 i++;
208 } 208 }
209 if (i == 0 && bond->params.arp_interval) 209 if (i == 0 && bond->params.arp_interval)
210 pr_warn("%s: removing last arp target with arp_interval on\n", 210 pr_warn("%s: Removing last arp target with arp_interval on\n",
211 bond->dev->name); 211 bond->dev->name);
212 if (err) 212 if (err)
213 return err; 213 return err;
@@ -216,7 +216,7 @@ static int bond_changelink(struct net_device *bond_dev,
216 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]); 216 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
217 217
218 if (arp_validate && miimon) { 218 if (arp_validate && miimon) {
219 pr_err("%s: ARP validating cannot be used with MII monitoring.\n", 219 pr_err("%s: ARP validating cannot be used with MII monitoring\n",
220 bond->dev->name); 220 bond->dev->name);
221 return -EINVAL; 221 return -EINVAL;
222 } 222 }
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 298c26509095..724e30fa20b9 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -20,7 +20,59 @@
20#include <linux/inet.h> 20#include <linux/inet.h>
21#include "bonding.h" 21#include "bonding.h"
22 22
23static struct bond_opt_value bond_mode_tbl[] = { 23static int bond_option_active_slave_set(struct bonding *bond,
24 const struct bond_opt_value *newval);
25static int bond_option_miimon_set(struct bonding *bond,
26 const struct bond_opt_value *newval);
27static int bond_option_updelay_set(struct bonding *bond,
28 const struct bond_opt_value *newval);
29static int bond_option_downdelay_set(struct bonding *bond,
30 const struct bond_opt_value *newval);
31static int bond_option_use_carrier_set(struct bonding *bond,
32 const struct bond_opt_value *newval);
33static int bond_option_arp_interval_set(struct bonding *bond,
34 const struct bond_opt_value *newval);
35static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
36static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
37static int bond_option_arp_ip_targets_set(struct bonding *bond,
38 const struct bond_opt_value *newval);
39static int bond_option_arp_validate_set(struct bonding *bond,
40 const struct bond_opt_value *newval);
41static int bond_option_arp_all_targets_set(struct bonding *bond,
42 const struct bond_opt_value *newval);
43static int bond_option_primary_set(struct bonding *bond,
44 const struct bond_opt_value *newval);
45static int bond_option_primary_reselect_set(struct bonding *bond,
46 const struct bond_opt_value *newval);
47static int bond_option_fail_over_mac_set(struct bonding *bond,
48 const struct bond_opt_value *newval);
49static int bond_option_xmit_hash_policy_set(struct bonding *bond,
50 const struct bond_opt_value *newval);
51static int bond_option_resend_igmp_set(struct bonding *bond,
52 const struct bond_opt_value *newval);
53static int bond_option_num_peer_notif_set(struct bonding *bond,
54 const struct bond_opt_value *newval);
55static int bond_option_all_slaves_active_set(struct bonding *bond,
56 const struct bond_opt_value *newval);
57static int bond_option_min_links_set(struct bonding *bond,
58 const struct bond_opt_value *newval);
59static int bond_option_lp_interval_set(struct bonding *bond,
60 const struct bond_opt_value *newval);
61static int bond_option_pps_set(struct bonding *bond,
62 const struct bond_opt_value *newval);
63static int bond_option_lacp_rate_set(struct bonding *bond,
64 const struct bond_opt_value *newval);
65static int bond_option_ad_select_set(struct bonding *bond,
66 const struct bond_opt_value *newval);
67static int bond_option_queue_id_set(struct bonding *bond,
68 const struct bond_opt_value *newval);
69static int bond_option_mode_set(struct bonding *bond,
70 const struct bond_opt_value *newval);
71static int bond_option_slaves_set(struct bonding *bond,
72 const struct bond_opt_value *newval);
73
74
75static const struct bond_opt_value bond_mode_tbl[] = {
24 { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT}, 76 { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
25 { "active-backup", BOND_MODE_ACTIVEBACKUP, 0}, 77 { "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
26 { "balance-xor", BOND_MODE_XOR, 0}, 78 { "balance-xor", BOND_MODE_XOR, 0},
@@ -31,13 +83,13 @@ static struct bond_opt_value bond_mode_tbl[] = {
31 { NULL, -1, 0}, 83 { NULL, -1, 0},
32}; 84};
33 85
34static struct bond_opt_value bond_pps_tbl[] = { 86static const struct bond_opt_value bond_pps_tbl[] = {
35 { "default", 1, BOND_VALFLAG_DEFAULT}, 87 { "default", 1, BOND_VALFLAG_DEFAULT},
36 { "maxval", USHRT_MAX, BOND_VALFLAG_MAX}, 88 { "maxval", USHRT_MAX, BOND_VALFLAG_MAX},
37 { NULL, -1, 0}, 89 { NULL, -1, 0},
38}; 90};
39 91
40static struct bond_opt_value bond_xmit_hashtype_tbl[] = { 92static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
41 { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT}, 93 { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
42 { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0}, 94 { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
43 { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0}, 95 { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
@@ -46,85 +98,88 @@ static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
46 { NULL, -1, 0}, 98 { NULL, -1, 0},
47}; 99};
48 100
49static struct bond_opt_value bond_arp_validate_tbl[] = { 101static const struct bond_opt_value bond_arp_validate_tbl[] = {
50 { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT}, 102 { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
51 { "active", BOND_ARP_VALIDATE_ACTIVE, 0}, 103 { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
52 { "backup", BOND_ARP_VALIDATE_BACKUP, 0}, 104 { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
53 { "all", BOND_ARP_VALIDATE_ALL, 0}, 105 { "all", BOND_ARP_VALIDATE_ALL, 0},
54 { NULL, -1, 0}, 106 { "filter", BOND_ARP_FILTER, 0},
107 { "filter_active", BOND_ARP_FILTER_ACTIVE, 0},
108 { "filter_backup", BOND_ARP_FILTER_BACKUP, 0},
109 { NULL, -1, 0},
55}; 110};
56 111
57static struct bond_opt_value bond_arp_all_targets_tbl[] = { 112static const struct bond_opt_value bond_arp_all_targets_tbl[] = {
58 { "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT}, 113 { "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
59 { "all", BOND_ARP_TARGETS_ALL, 0}, 114 { "all", BOND_ARP_TARGETS_ALL, 0},
60 { NULL, -1, 0}, 115 { NULL, -1, 0},
61}; 116};
62 117
63static struct bond_opt_value bond_fail_over_mac_tbl[] = { 118static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
64 { "none", BOND_FOM_NONE, BOND_VALFLAG_DEFAULT}, 119 { "none", BOND_FOM_NONE, BOND_VALFLAG_DEFAULT},
65 { "active", BOND_FOM_ACTIVE, 0}, 120 { "active", BOND_FOM_ACTIVE, 0},
66 { "follow", BOND_FOM_FOLLOW, 0}, 121 { "follow", BOND_FOM_FOLLOW, 0},
67 { NULL, -1, 0}, 122 { NULL, -1, 0},
68}; 123};
69 124
70static struct bond_opt_value bond_intmax_tbl[] = { 125static const struct bond_opt_value bond_intmax_tbl[] = {
71 { "off", 0, BOND_VALFLAG_DEFAULT}, 126 { "off", 0, BOND_VALFLAG_DEFAULT},
72 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 127 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
73}; 128};
74 129
75static struct bond_opt_value bond_lacp_rate_tbl[] = { 130static const struct bond_opt_value bond_lacp_rate_tbl[] = {
76 { "slow", AD_LACP_SLOW, 0}, 131 { "slow", AD_LACP_SLOW, 0},
77 { "fast", AD_LACP_FAST, 0}, 132 { "fast", AD_LACP_FAST, 0},
78 { NULL, -1, 0}, 133 { NULL, -1, 0},
79}; 134};
80 135
81static struct bond_opt_value bond_ad_select_tbl[] = { 136static const struct bond_opt_value bond_ad_select_tbl[] = {
82 { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT}, 137 { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
83 { "bandwidth", BOND_AD_BANDWIDTH, 0}, 138 { "bandwidth", BOND_AD_BANDWIDTH, 0},
84 { "count", BOND_AD_COUNT, 0}, 139 { "count", BOND_AD_COUNT, 0},
85 { NULL, -1, 0}, 140 { NULL, -1, 0},
86}; 141};
87 142
88static struct bond_opt_value bond_num_peer_notif_tbl[] = { 143static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
89 { "off", 0, 0}, 144 { "off", 0, 0},
90 { "maxval", 255, BOND_VALFLAG_MAX}, 145 { "maxval", 255, BOND_VALFLAG_MAX},
91 { "default", 1, BOND_VALFLAG_DEFAULT}, 146 { "default", 1, BOND_VALFLAG_DEFAULT},
92 { NULL, -1, 0} 147 { NULL, -1, 0}
93}; 148};
94 149
95static struct bond_opt_value bond_primary_reselect_tbl[] = { 150static const struct bond_opt_value bond_primary_reselect_tbl[] = {
96 { "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT}, 151 { "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
97 { "better", BOND_PRI_RESELECT_BETTER, 0}, 152 { "better", BOND_PRI_RESELECT_BETTER, 0},
98 { "failure", BOND_PRI_RESELECT_FAILURE, 0}, 153 { "failure", BOND_PRI_RESELECT_FAILURE, 0},
99 { NULL, -1}, 154 { NULL, -1},
100}; 155};
101 156
102static struct bond_opt_value bond_use_carrier_tbl[] = { 157static const struct bond_opt_value bond_use_carrier_tbl[] = {
103 { "off", 0, 0}, 158 { "off", 0, 0},
104 { "on", 1, BOND_VALFLAG_DEFAULT}, 159 { "on", 1, BOND_VALFLAG_DEFAULT},
105 { NULL, -1, 0} 160 { NULL, -1, 0}
106}; 161};
107 162
108static struct bond_opt_value bond_all_slaves_active_tbl[] = { 163static const struct bond_opt_value bond_all_slaves_active_tbl[] = {
109 { "off", 0, BOND_VALFLAG_DEFAULT}, 164 { "off", 0, BOND_VALFLAG_DEFAULT},
110 { "on", 1, 0}, 165 { "on", 1, 0},
111 { NULL, -1, 0} 166 { NULL, -1, 0}
112}; 167};
113 168
114static struct bond_opt_value bond_resend_igmp_tbl[] = { 169static const struct bond_opt_value bond_resend_igmp_tbl[] = {
115 { "off", 0, 0}, 170 { "off", 0, 0},
116 { "maxval", 255, BOND_VALFLAG_MAX}, 171 { "maxval", 255, BOND_VALFLAG_MAX},
117 { "default", 1, BOND_VALFLAG_DEFAULT}, 172 { "default", 1, BOND_VALFLAG_DEFAULT},
118 { NULL, -1, 0} 173 { NULL, -1, 0}
119}; 174};
120 175
121static struct bond_opt_value bond_lp_interval_tbl[] = { 176static const struct bond_opt_value bond_lp_interval_tbl[] = {
122 { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, 177 { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
123 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 178 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
124 { NULL, -1, 0}, 179 { NULL, -1, 0},
125}; 180};
126 181
127static struct bond_option bond_opts[] = { 182static const struct bond_option bond_opts[] = {
128 [BOND_OPT_MODE] = { 183 [BOND_OPT_MODE] = {
129 .id = BOND_OPT_MODE, 184 .id = BOND_OPT_MODE,
130 .name = "mode", 185 .name = "mode",
@@ -152,7 +207,8 @@ static struct bond_option bond_opts[] = {
152 .id = BOND_OPT_ARP_VALIDATE, 207 .id = BOND_OPT_ARP_VALIDATE,
153 .name = "arp_validate", 208 .name = "arp_validate",
154 .desc = "validate src/dst of ARP probes", 209 .desc = "validate src/dst of ARP probes",
155 .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)), 210 .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
211 BIT(BOND_MODE_ALB),
156 .values = bond_arp_validate_tbl, 212 .values = bond_arp_validate_tbl,
157 .set = bond_option_arp_validate_set 213 .set = bond_option_arp_validate_set
158 }, 214 },
@@ -312,9 +368,9 @@ static struct bond_option bond_opts[] = {
312}; 368};
313 369
314/* Searches for a value in opt's values[] table */ 370/* Searches for a value in opt's values[] table */
315struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val) 371const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
316{ 372{
317 struct bond_option *opt; 373 const struct bond_option *opt;
318 int i; 374 int i;
319 375
320 opt = bond_opt_get(option); 376 opt = bond_opt_get(option);
@@ -328,7 +384,7 @@ struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
328} 384}
329 385
330/* Searches for a value in opt's values[] table which matches the flagmask */ 386/* Searches for a value in opt's values[] table which matches the flagmask */
331static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt, 387static const struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
332 u32 flagmask) 388 u32 flagmask)
333{ 389{
334 int i; 390 int i;
@@ -345,7 +401,7 @@ static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
345 */ 401 */
346static bool bond_opt_check_range(const struct bond_option *opt, u64 val) 402static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
347{ 403{
348 struct bond_opt_value *minval, *maxval; 404 const struct bond_opt_value *minval, *maxval;
349 405
350 minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN); 406 minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
351 maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX); 407 maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
@@ -365,11 +421,12 @@ static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
365 * or the struct_opt_value that matched. It also strips the new line from 421 * or the struct_opt_value that matched. It also strips the new line from
366 * @val->string if it's present. 422 * @val->string if it's present.
367 */ 423 */
368struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, 424const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
369 struct bond_opt_value *val) 425 struct bond_opt_value *val)
370{ 426{
371 char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, }; 427 char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
372 struct bond_opt_value *tbl, *ret = NULL; 428 const struct bond_opt_value *tbl;
429 const struct bond_opt_value *ret = NULL;
373 bool checkval; 430 bool checkval;
374 int i, rv; 431 int i, rv;
375 432
@@ -448,7 +505,7 @@ static int bond_opt_check_deps(struct bonding *bond,
448static void bond_opt_dep_print(struct bonding *bond, 505static void bond_opt_dep_print(struct bonding *bond,
449 const struct bond_option *opt) 506 const struct bond_option *opt)
450{ 507{
451 struct bond_opt_value *modeval; 508 const struct bond_opt_value *modeval;
452 struct bond_params *params; 509 struct bond_params *params;
453 510
454 params = &bond->params; 511 params = &bond->params;
@@ -461,9 +518,9 @@ static void bond_opt_dep_print(struct bonding *bond,
461 518
462static void bond_opt_error_interpret(struct bonding *bond, 519static void bond_opt_error_interpret(struct bonding *bond,
463 const struct bond_option *opt, 520 const struct bond_option *opt,
464 int error, struct bond_opt_value *val) 521 int error, const struct bond_opt_value *val)
465{ 522{
466 struct bond_opt_value *minval, *maxval; 523 const struct bond_opt_value *minval, *maxval;
467 char *p; 524 char *p;
468 525
469 switch (error) { 526 switch (error) {
@@ -474,10 +531,10 @@ static void bond_opt_error_interpret(struct bonding *bond,
474 p = strchr(val->string, '\n'); 531 p = strchr(val->string, '\n');
475 if (p) 532 if (p)
476 *p = '\0'; 533 *p = '\0';
477 pr_err("%s: option %s: invalid value (%s).\n", 534 pr_err("%s: option %s: invalid value (%s)\n",
478 bond->dev->name, opt->name, val->string); 535 bond->dev->name, opt->name, val->string);
479 } else { 536 } else {
480 pr_err("%s: option %s: invalid value (%llu).\n", 537 pr_err("%s: option %s: invalid value (%llu)\n",
481 bond->dev->name, opt->name, val->value); 538 bond->dev->name, opt->name, val->value);
482 } 539 }
483 } 540 }
@@ -485,7 +542,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
485 maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX); 542 maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
486 if (!maxval) 543 if (!maxval)
487 break; 544 break;
488 pr_err("%s: option %s: allowed values %llu - %llu.\n", 545 pr_err("%s: option %s: allowed values %llu - %llu\n",
489 bond->dev->name, opt->name, minval ? minval->value : 0, 546 bond->dev->name, opt->name, minval ? minval->value : 0,
490 maxval->value); 547 maxval->value);
491 break; 548 break;
@@ -493,11 +550,11 @@ static void bond_opt_error_interpret(struct bonding *bond,
493 bond_opt_dep_print(bond, opt); 550 bond_opt_dep_print(bond, opt);
494 break; 551 break;
495 case -ENOTEMPTY: 552 case -ENOTEMPTY:
496 pr_err("%s: option %s: unable to set because the bond device has slaves.\n", 553 pr_err("%s: option %s: unable to set because the bond device has slaves\n",
497 bond->dev->name, opt->name); 554 bond->dev->name, opt->name);
498 break; 555 break;
499 case -EBUSY: 556 case -EBUSY:
500 pr_err("%s: option %s: unable to set because the bond device is up.\n", 557 pr_err("%s: option %s: unable to set because the bond device is up\n",
501 bond->dev->name, opt->name); 558 bond->dev->name, opt->name);
502 break; 559 break;
503 default: 560 default:
@@ -518,7 +575,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
518int __bond_opt_set(struct bonding *bond, 575int __bond_opt_set(struct bonding *bond,
519 unsigned int option, struct bond_opt_value *val) 576 unsigned int option, struct bond_opt_value *val)
520{ 577{
521 struct bond_opt_value *retval = NULL; 578 const struct bond_opt_value *retval = NULL;
522 const struct bond_option *opt; 579 const struct bond_option *opt;
523 int ret = -ENOENT; 580 int ret = -ENOENT;
524 581
@@ -573,7 +630,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
573 * This function checks if option is valid and if so returns a pointer 630 * This function checks if option is valid and if so returns a pointer
574 * to its entry in the bond_opts[] option array. 631 * to its entry in the bond_opts[] option array.
575 */ 632 */
576struct bond_option *bond_opt_get(unsigned int option) 633const struct bond_option *bond_opt_get(unsigned int option)
577{ 634{
578 if (!BOND_OPT_VALID(option)) 635 if (!BOND_OPT_VALID(option))
579 return NULL; 636 return NULL;
@@ -581,7 +638,7 @@ struct bond_option *bond_opt_get(unsigned int option)
581 return &bond_opts[option]; 638 return &bond_opts[option];
582} 639}
583 640
584int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval) 641int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
585{ 642{
586 if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) { 643 if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
587 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", 644 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -590,7 +647,7 @@ int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
590 bond->params.arp_interval = 0; 647 bond->params.arp_interval = 0;
591 /* set miimon to default value */ 648 /* set miimon to default value */
592 bond->params.miimon = BOND_DEFAULT_MIIMON; 649 bond->params.miimon = BOND_DEFAULT_MIIMON;
593 pr_info("%s: Setting MII monitoring interval to %d.\n", 650 pr_info("%s: Setting MII monitoring interval to %d\n",
594 bond->dev->name, bond->params.miimon); 651 bond->dev->name, bond->params.miimon);
595 } 652 }
596 653
@@ -619,8 +676,8 @@ struct net_device *bond_option_active_slave_get(struct bonding *bond)
619 return __bond_option_active_slave_get(bond, bond->curr_active_slave); 676 return __bond_option_active_slave_get(bond, bond->curr_active_slave);
620} 677}
621 678
622int bond_option_active_slave_set(struct bonding *bond, 679static int bond_option_active_slave_set(struct bonding *bond,
623 struct bond_opt_value *newval) 680 const struct bond_opt_value *newval)
624{ 681{
625 char ifname[IFNAMSIZ] = { 0, }; 682 char ifname[IFNAMSIZ] = { 0, };
626 struct net_device *slave_dev; 683 struct net_device *slave_dev;
@@ -637,13 +694,13 @@ int bond_option_active_slave_set(struct bonding *bond,
637 694
638 if (slave_dev) { 695 if (slave_dev) {
639 if (!netif_is_bond_slave(slave_dev)) { 696 if (!netif_is_bond_slave(slave_dev)) {
640 pr_err("Device %s is not bonding slave.\n", 697 pr_err("Device %s is not bonding slave\n",
641 slave_dev->name); 698 slave_dev->name);
642 return -EINVAL; 699 return -EINVAL;
643 } 700 }
644 701
645 if (bond->dev != netdev_master_upper_dev_get(slave_dev)) { 702 if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
646 pr_err("%s: Device %s is not our slave.\n", 703 pr_err("%s: Device %s is not our slave\n",
647 bond->dev->name, slave_dev->name); 704 bond->dev->name, slave_dev->name);
648 return -EINVAL; 705 return -EINVAL;
649 } 706 }
@@ -654,9 +711,8 @@ int bond_option_active_slave_set(struct bonding *bond,
654 711
655 /* check to see if we are clearing active */ 712 /* check to see if we are clearing active */
656 if (!slave_dev) { 713 if (!slave_dev) {
657 pr_info("%s: Clearing current active slave.\n", 714 pr_info("%s: Clearing current active slave\n", bond->dev->name);
658 bond->dev->name); 715 RCU_INIT_POINTER(bond->curr_active_slave, NULL);
659 rcu_assign_pointer(bond->curr_active_slave, NULL);
660 bond_select_active_slave(bond); 716 bond_select_active_slave(bond);
661 } else { 717 } else {
662 struct slave *old_active = bond->curr_active_slave; 718 struct slave *old_active = bond->curr_active_slave;
@@ -666,16 +722,16 @@ int bond_option_active_slave_set(struct bonding *bond,
666 722
667 if (new_active == old_active) { 723 if (new_active == old_active) {
668 /* do nothing */ 724 /* do nothing */
669 pr_info("%s: %s is already the current active slave.\n", 725 pr_info("%s: %s is already the current active slave\n",
670 bond->dev->name, new_active->dev->name); 726 bond->dev->name, new_active->dev->name);
671 } else { 727 } else {
672 if (old_active && (new_active->link == BOND_LINK_UP) && 728 if (old_active && (new_active->link == BOND_LINK_UP) &&
673 IS_UP(new_active->dev)) { 729 IS_UP(new_active->dev)) {
674 pr_info("%s: Setting %s as active slave.\n", 730 pr_info("%s: Setting %s as active slave\n",
675 bond->dev->name, new_active->dev->name); 731 bond->dev->name, new_active->dev->name);
676 bond_change_active_slave(bond, new_active); 732 bond_change_active_slave(bond, new_active);
677 } else { 733 } else {
678 pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n", 734 pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
679 bond->dev->name, new_active->dev->name, 735 bond->dev->name, new_active->dev->name,
680 new_active->dev->name); 736 new_active->dev->name);
681 ret = -EINVAL; 737 ret = -EINVAL;
@@ -689,21 +745,22 @@ int bond_option_active_slave_set(struct bonding *bond,
689 return ret; 745 return ret;
690} 746}
691 747
692int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval) 748static int bond_option_miimon_set(struct bonding *bond,
749 const struct bond_opt_value *newval)
693{ 750{
694 pr_info("%s: Setting MII monitoring interval to %llu.\n", 751 pr_info("%s: Setting MII monitoring interval to %llu\n",
695 bond->dev->name, newval->value); 752 bond->dev->name, newval->value);
696 bond->params.miimon = newval->value; 753 bond->params.miimon = newval->value;
697 if (bond->params.updelay) 754 if (bond->params.updelay)
698 pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", 755 pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
699 bond->dev->name, 756 bond->dev->name,
700 bond->params.updelay * bond->params.miimon); 757 bond->params.updelay * bond->params.miimon);
701 if (bond->params.downdelay) 758 if (bond->params.downdelay)
702 pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", 759 pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
703 bond->dev->name, 760 bond->dev->name,
704 bond->params.downdelay * bond->params.miimon); 761 bond->params.downdelay * bond->params.miimon);
705 if (newval->value && bond->params.arp_interval) { 762 if (newval->value && bond->params.arp_interval) {
706 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", 763 pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
707 bond->dev->name); 764 bond->dev->name);
708 bond->params.arp_interval = 0; 765 bond->params.arp_interval = 0;
709 if (bond->params.arp_validate) 766 if (bond->params.arp_validate)
@@ -726,7 +783,8 @@ int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
726 return 0; 783 return 0;
727} 784}
728 785
729int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval) 786static int bond_option_updelay_set(struct bonding *bond,
787 const struct bond_opt_value *newval)
730{ 788{
731 int value = newval->value; 789 int value = newval->value;
732 790
@@ -743,15 +801,14 @@ int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
743 bond->params.miimon); 801 bond->params.miimon);
744 } 802 }
745 bond->params.updelay = value / bond->params.miimon; 803 bond->params.updelay = value / bond->params.miimon;
746 pr_info("%s: Setting up delay to %d.\n", 804 pr_info("%s: Setting up delay to %d\n",
747 bond->dev->name, 805 bond->dev->name, bond->params.updelay * bond->params.miimon);
748 bond->params.updelay * bond->params.miimon);
749 806
750 return 0; 807 return 0;
751} 808}
752 809
753int bond_option_downdelay_set(struct bonding *bond, 810static int bond_option_downdelay_set(struct bonding *bond,
754 struct bond_opt_value *newval) 811 const struct bond_opt_value *newval)
755{ 812{
756 int value = newval->value; 813 int value = newval->value;
757 814
@@ -768,37 +825,36 @@ int bond_option_downdelay_set(struct bonding *bond,
768 bond->params.miimon); 825 bond->params.miimon);
769 } 826 }
770 bond->params.downdelay = value / bond->params.miimon; 827 bond->params.downdelay = value / bond->params.miimon;
771 pr_info("%s: Setting down delay to %d.\n", 828 pr_info("%s: Setting down delay to %d\n",
772 bond->dev->name, 829 bond->dev->name, bond->params.downdelay * bond->params.miimon);
773 bond->params.downdelay * bond->params.miimon);
774 830
775 return 0; 831 return 0;
776} 832}
777 833
778int bond_option_use_carrier_set(struct bonding *bond, 834static int bond_option_use_carrier_set(struct bonding *bond,
779 struct bond_opt_value *newval) 835 const struct bond_opt_value *newval)
780{ 836{
781 pr_info("%s: Setting use_carrier to %llu.\n", 837 pr_info("%s: Setting use_carrier to %llu\n",
782 bond->dev->name, newval->value); 838 bond->dev->name, newval->value);
783 bond->params.use_carrier = newval->value; 839 bond->params.use_carrier = newval->value;
784 840
785 return 0; 841 return 0;
786} 842}
787 843
788int bond_option_arp_interval_set(struct bonding *bond, 844static int bond_option_arp_interval_set(struct bonding *bond,
789 struct bond_opt_value *newval) 845 const struct bond_opt_value *newval)
790{ 846{
791 pr_info("%s: Setting ARP monitoring interval to %llu.\n", 847 pr_info("%s: Setting ARP monitoring interval to %llu\n",
792 bond->dev->name, newval->value); 848 bond->dev->name, newval->value);
793 bond->params.arp_interval = newval->value; 849 bond->params.arp_interval = newval->value;
794 if (newval->value) { 850 if (newval->value) {
795 if (bond->params.miimon) { 851 if (bond->params.miimon) {
796 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", 852 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
797 bond->dev->name, bond->dev->name); 853 bond->dev->name, bond->dev->name);
798 bond->params.miimon = 0; 854 bond->params.miimon = 0;
799 } 855 }
800 if (!bond->params.arp_targets[0]) 856 if (!bond->params.arp_targets[0])
801 pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", 857 pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
802 bond->dev->name); 858 bond->dev->name);
803 } 859 }
804 if (bond->dev->flags & IFF_UP) { 860 if (bond->dev->flags & IFF_UP) {
@@ -813,8 +869,7 @@ int bond_option_arp_interval_set(struct bonding *bond,
813 cancel_delayed_work_sync(&bond->arp_work); 869 cancel_delayed_work_sync(&bond->arp_work);
814 } else { 870 } else {
815 /* arp_validate can be set only in active-backup mode */ 871 /* arp_validate can be set only in active-backup mode */
816 if (bond->params.arp_validate) 872 bond->recv_probe = bond_arp_rcv;
817 bond->recv_probe = bond_arp_rcv;
818 cancel_delayed_work_sync(&bond->mii_work); 873 cancel_delayed_work_sync(&bond->mii_work);
819 queue_delayed_work(bond->wq, &bond->arp_work, 0); 874 queue_delayed_work(bond->wq, &bond->arp_work, 0);
820 } 875 }
@@ -857,19 +912,18 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
857 912
858 ind = bond_get_targets_ip(targets, 0); /* first free slot */ 913 ind = bond_get_targets_ip(targets, 0); /* first free slot */
859 if (ind == -1) { 914 if (ind == -1) {
860 pr_err("%s: ARP target table is full!\n", 915 pr_err("%s: ARP target table is full!\n", bond->dev->name);
861 bond->dev->name);
862 return -EINVAL; 916 return -EINVAL;
863 } 917 }
864 918
865 pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target); 919 pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
866 920
867 _bond_options_arp_ip_target_set(bond, ind, target, jiffies); 921 _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
868 922
869 return 0; 923 return 0;
870} 924}
871 925
872int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target) 926static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
873{ 927{
874 int ret; 928 int ret;
875 929
@@ -881,7 +935,7 @@ int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
881 return ret; 935 return ret;
882} 936}
883 937
884int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) 938static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
885{ 939{
886 __be32 *targets = bond->params.arp_targets; 940 __be32 *targets = bond->params.arp_targets;
887 struct list_head *iter; 941 struct list_head *iter;
@@ -897,17 +951,16 @@ int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
897 951
898 ind = bond_get_targets_ip(targets, target); 952 ind = bond_get_targets_ip(targets, target);
899 if (ind == -1) { 953 if (ind == -1) {
900 pr_err("%s: unable to remove nonexistent ARP target %pI4.\n", 954 pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
901 bond->dev->name, &target); 955 bond->dev->name, &target);
902 return -EINVAL; 956 return -EINVAL;
903 } 957 }
904 958
905 if (ind == 0 && !targets[1] && bond->params.arp_interval) 959 if (ind == 0 && !targets[1] && bond->params.arp_interval)
906 pr_warn("%s: removing last arp target with arp_interval on\n", 960 pr_warn("%s: Removing last arp target with arp_interval on\n",
907 bond->dev->name); 961 bond->dev->name);
908 962
909 pr_info("%s: removing ARP target %pI4.\n", bond->dev->name, 963 pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
910 &target);
911 964
912 /* not to race with bond_arp_rcv */ 965 /* not to race with bond_arp_rcv */
913 write_lock_bh(&bond->lock); 966 write_lock_bh(&bond->lock);
@@ -938,8 +991,8 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond)
938 write_unlock_bh(&bond->lock); 991 write_unlock_bh(&bond->lock);
939} 992}
940 993
941int bond_option_arp_ip_targets_set(struct bonding *bond, 994static int bond_option_arp_ip_targets_set(struct bonding *bond,
942 struct bond_opt_value *newval) 995 const struct bond_opt_value *newval)
943{ 996{
944 int ret = -EPERM; 997 int ret = -EPERM;
945 __be32 target; 998 __be32 target;
@@ -955,7 +1008,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
955 else if (newval->string[0] == '-') 1008 else if (newval->string[0] == '-')
956 ret = bond_option_arp_ip_target_rem(bond, target); 1009 ret = bond_option_arp_ip_target_rem(bond, target);
957 else 1010 else
958 pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n", 1011 pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
959 bond->dev->name); 1012 bond->dev->name);
960 } else { 1013 } else {
961 target = newval->value; 1014 target = newval->value;
@@ -965,10 +1018,10 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
965 return ret; 1018 return ret;
966} 1019}
967 1020
968int bond_option_arp_validate_set(struct bonding *bond, 1021static int bond_option_arp_validate_set(struct bonding *bond,
969 struct bond_opt_value *newval) 1022 const struct bond_opt_value *newval)
970{ 1023{
971 pr_info("%s: setting arp_validate to %s (%llu).\n", 1024 pr_info("%s: Setting arp_validate to %s (%llu)\n",
972 bond->dev->name, newval->string, newval->value); 1025 bond->dev->name, newval->string, newval->value);
973 1026
974 if (bond->dev->flags & IFF_UP) { 1027 if (bond->dev->flags & IFF_UP) {
@@ -982,17 +1035,18 @@ int bond_option_arp_validate_set(struct bonding *bond,
982 return 0; 1035 return 0;
983} 1036}
984 1037
985int bond_option_arp_all_targets_set(struct bonding *bond, 1038static int bond_option_arp_all_targets_set(struct bonding *bond,
986 struct bond_opt_value *newval) 1039 const struct bond_opt_value *newval)
987{ 1040{
988 pr_info("%s: setting arp_all_targets to %s (%llu).\n", 1041 pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
989 bond->dev->name, newval->string, newval->value); 1042 bond->dev->name, newval->string, newval->value);
990 bond->params.arp_all_targets = newval->value; 1043 bond->params.arp_all_targets = newval->value;
991 1044
992 return 0; 1045 return 0;
993} 1046}
994 1047
995int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval) 1048static int bond_option_primary_set(struct bonding *bond,
1049 const struct bond_opt_value *newval)
996{ 1050{
997 char *p, *primary = newval->string; 1051 char *p, *primary = newval->string;
998 struct list_head *iter; 1052 struct list_head *iter;
@@ -1007,8 +1061,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
1007 *p = '\0'; 1061 *p = '\0';
1008 /* check to see if we are clearing primary */ 1062 /* check to see if we are clearing primary */
1009 if (!strlen(primary)) { 1063 if (!strlen(primary)) {
1010 pr_info("%s: Setting primary slave to None.\n", 1064 pr_info("%s: Setting primary slave to None\n", bond->dev->name);
1011 bond->dev->name);
1012 bond->primary_slave = NULL; 1065 bond->primary_slave = NULL;
1013 memset(bond->params.primary, 0, sizeof(bond->params.primary)); 1066 memset(bond->params.primary, 0, sizeof(bond->params.primary));
1014 bond_select_active_slave(bond); 1067 bond_select_active_slave(bond);
@@ -1017,7 +1070,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
1017 1070
1018 bond_for_each_slave(bond, slave, iter) { 1071 bond_for_each_slave(bond, slave, iter) {
1019 if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) { 1072 if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
1020 pr_info("%s: Setting %s as primary slave.\n", 1073 pr_info("%s: Setting %s as primary slave\n",
1021 bond->dev->name, slave->dev->name); 1074 bond->dev->name, slave->dev->name);
1022 bond->primary_slave = slave; 1075 bond->primary_slave = slave;
1023 strcpy(bond->params.primary, slave->dev->name); 1076 strcpy(bond->params.primary, slave->dev->name);
@@ -1027,15 +1080,14 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
1027 } 1080 }
1028 1081
1029 if (bond->primary_slave) { 1082 if (bond->primary_slave) {
1030 pr_info("%s: Setting primary slave to None.\n", 1083 pr_info("%s: Setting primary slave to None\n", bond->dev->name);
1031 bond->dev->name);
1032 bond->primary_slave = NULL; 1084 bond->primary_slave = NULL;
1033 bond_select_active_slave(bond); 1085 bond_select_active_slave(bond);
1034 } 1086 }
1035 strncpy(bond->params.primary, primary, IFNAMSIZ); 1087 strncpy(bond->params.primary, primary, IFNAMSIZ);
1036 bond->params.primary[IFNAMSIZ - 1] = 0; 1088 bond->params.primary[IFNAMSIZ - 1] = 0;
1037 1089
1038 pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n", 1090 pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
1039 bond->dev->name, primary, bond->dev->name); 1091 bond->dev->name, primary, bond->dev->name);
1040 1092
1041out: 1093out:
@@ -1046,10 +1098,10 @@ out:
1046 return 0; 1098 return 0;
1047} 1099}
1048 1100
1049int bond_option_primary_reselect_set(struct bonding *bond, 1101static int bond_option_primary_reselect_set(struct bonding *bond,
1050 struct bond_opt_value *newval) 1102 const struct bond_opt_value *newval)
1051{ 1103{
1052 pr_info("%s: setting primary_reselect to %s (%llu).\n", 1104 pr_info("%s: Setting primary_reselect to %s (%llu)\n",
1053 bond->dev->name, newval->string, newval->value); 1105 bond->dev->name, newval->string, newval->value);
1054 bond->params.primary_reselect = newval->value; 1106 bond->params.primary_reselect = newval->value;
1055 1107
@@ -1062,46 +1114,46 @@ int bond_option_primary_reselect_set(struct bonding *bond,
1062 return 0; 1114 return 0;
1063} 1115}
1064 1116
1065int bond_option_fail_over_mac_set(struct bonding *bond, 1117static int bond_option_fail_over_mac_set(struct bonding *bond,
1066 struct bond_opt_value *newval) 1118 const struct bond_opt_value *newval)
1067{ 1119{
1068 pr_info("%s: Setting fail_over_mac to %s (%llu).\n", 1120 pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
1069 bond->dev->name, newval->string, newval->value); 1121 bond->dev->name, newval->string, newval->value);
1070 bond->params.fail_over_mac = newval->value; 1122 bond->params.fail_over_mac = newval->value;
1071 1123
1072 return 0; 1124 return 0;
1073} 1125}
1074 1126
1075int bond_option_xmit_hash_policy_set(struct bonding *bond, 1127static int bond_option_xmit_hash_policy_set(struct bonding *bond,
1076 struct bond_opt_value *newval) 1128 const struct bond_opt_value *newval)
1077{ 1129{
1078 pr_info("%s: setting xmit hash policy to %s (%llu).\n", 1130 pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
1079 bond->dev->name, newval->string, newval->value); 1131 bond->dev->name, newval->string, newval->value);
1080 bond->params.xmit_policy = newval->value; 1132 bond->params.xmit_policy = newval->value;
1081 1133
1082 return 0; 1134 return 0;
1083} 1135}
1084 1136
1085int bond_option_resend_igmp_set(struct bonding *bond, 1137static int bond_option_resend_igmp_set(struct bonding *bond,
1086 struct bond_opt_value *newval) 1138 const struct bond_opt_value *newval)
1087{ 1139{
1088 pr_info("%s: Setting resend_igmp to %llu.\n", 1140 pr_info("%s: Setting resend_igmp to %llu\n",
1089 bond->dev->name, newval->value); 1141 bond->dev->name, newval->value);
1090 bond->params.resend_igmp = newval->value; 1142 bond->params.resend_igmp = newval->value;
1091 1143
1092 return 0; 1144 return 0;
1093} 1145}
1094 1146
1095int bond_option_num_peer_notif_set(struct bonding *bond, 1147static int bond_option_num_peer_notif_set(struct bonding *bond,
1096 struct bond_opt_value *newval) 1148 const struct bond_opt_value *newval)
1097{ 1149{
1098 bond->params.num_peer_notif = newval->value; 1150 bond->params.num_peer_notif = newval->value;
1099 1151
1100 return 0; 1152 return 0;
1101} 1153}
1102 1154
1103int bond_option_all_slaves_active_set(struct bonding *bond, 1155static int bond_option_all_slaves_active_set(struct bonding *bond,
1104 struct bond_opt_value *newval) 1156 const struct bond_opt_value *newval)
1105{ 1157{
1106 struct list_head *iter; 1158 struct list_head *iter;
1107 struct slave *slave; 1159 struct slave *slave;
@@ -1121,8 +1173,8 @@ int bond_option_all_slaves_active_set(struct bonding *bond,
1121 return 0; 1173 return 0;
1122} 1174}
1123 1175
1124int bond_option_min_links_set(struct bonding *bond, 1176static int bond_option_min_links_set(struct bonding *bond,
1125 struct bond_opt_value *newval) 1177 const struct bond_opt_value *newval)
1126{ 1178{
1127 pr_info("%s: Setting min links value to %llu\n", 1179 pr_info("%s: Setting min links value to %llu\n",
1128 bond->dev->name, newval->value); 1180 bond->dev->name, newval->value);
@@ -1131,15 +1183,16 @@ int bond_option_min_links_set(struct bonding *bond,
1131 return 0; 1183 return 0;
1132} 1184}
1133 1185
1134int bond_option_lp_interval_set(struct bonding *bond, 1186static int bond_option_lp_interval_set(struct bonding *bond,
1135 struct bond_opt_value *newval) 1187 const struct bond_opt_value *newval)
1136{ 1188{
1137 bond->params.lp_interval = newval->value; 1189 bond->params.lp_interval = newval->value;
1138 1190
1139 return 0; 1191 return 0;
1140} 1192}
1141 1193
1142int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval) 1194static int bond_option_pps_set(struct bonding *bond,
1195 const struct bond_opt_value *newval)
1143{ 1196{
1144 bond->params.packets_per_slave = newval->value; 1197 bond->params.packets_per_slave = newval->value;
1145 if (newval->value > 0) { 1198 if (newval->value > 0) {
@@ -1156,10 +1209,10 @@ int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
1156 return 0; 1209 return 0;
1157} 1210}
1158 1211
1159int bond_option_lacp_rate_set(struct bonding *bond, 1212static int bond_option_lacp_rate_set(struct bonding *bond,
1160 struct bond_opt_value *newval) 1213 const struct bond_opt_value *newval)
1161{ 1214{
1162 pr_info("%s: Setting LACP rate to %s (%llu).\n", 1215 pr_info("%s: Setting LACP rate to %s (%llu)\n",
1163 bond->dev->name, newval->string, newval->value); 1216 bond->dev->name, newval->string, newval->value);
1164 bond->params.lacp_fast = newval->value; 1217 bond->params.lacp_fast = newval->value;
1165 bond_3ad_update_lacp_rate(bond); 1218 bond_3ad_update_lacp_rate(bond);
@@ -1167,18 +1220,18 @@ int bond_option_lacp_rate_set(struct bonding *bond,
1167 return 0; 1220 return 0;
1168} 1221}
1169 1222
1170int bond_option_ad_select_set(struct bonding *bond, 1223static int bond_option_ad_select_set(struct bonding *bond,
1171 struct bond_opt_value *newval) 1224 const struct bond_opt_value *newval)
1172{ 1225{
1173 pr_info("%s: Setting ad_select to %s (%llu).\n", 1226 pr_info("%s: Setting ad_select to %s (%llu)\n",
1174 bond->dev->name, newval->string, newval->value); 1227 bond->dev->name, newval->string, newval->value);
1175 bond->params.ad_select = newval->value; 1228 bond->params.ad_select = newval->value;
1176 1229
1177 return 0; 1230 return 0;
1178} 1231}
1179 1232
1180int bond_option_queue_id_set(struct bonding *bond, 1233static int bond_option_queue_id_set(struct bonding *bond,
1181 struct bond_opt_value *newval) 1234 const struct bond_opt_value *newval)
1182{ 1235{
1183 struct slave *slave, *update_slave; 1236 struct slave *slave, *update_slave;
1184 struct net_device *sdev; 1237 struct net_device *sdev;
@@ -1200,8 +1253,7 @@ int bond_option_queue_id_set(struct bonding *bond,
1200 goto err_no_cmd; 1253 goto err_no_cmd;
1201 1254
1202 /* Check buffer length, valid ifname and queue id */ 1255 /* Check buffer length, valid ifname and queue id */
1203 if (strlen(newval->string) > IFNAMSIZ || 1256 if (!dev_valid_name(newval->string) ||
1204 !dev_valid_name(newval->string) ||
1205 qid > bond->dev->real_num_tx_queues) 1257 qid > bond->dev->real_num_tx_queues)
1206 goto err_no_cmd; 1258 goto err_no_cmd;
1207 1259
@@ -1233,14 +1285,14 @@ out:
1233 return ret; 1285 return ret;
1234 1286
1235err_no_cmd: 1287err_no_cmd:
1236 pr_info("invalid input for queue_id set for %s.\n", 1288 pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
1237 bond->dev->name);
1238 ret = -EPERM; 1289 ret = -EPERM;
1239 goto out; 1290 goto out;
1240 1291
1241} 1292}
1242 1293
1243int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval) 1294static int bond_option_slaves_set(struct bonding *bond,
1295 const struct bond_opt_value *newval)
1244{ 1296{
1245 char command[IFNAMSIZ + 1] = { 0, }; 1297 char command[IFNAMSIZ + 1] = { 0, };
1246 struct net_device *dev; 1298 struct net_device *dev;
@@ -1255,7 +1307,7 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
1255 1307
1256 dev = __dev_get_by_name(dev_net(bond->dev), ifname); 1308 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
1257 if (!dev) { 1309 if (!dev) {
1258 pr_info("%s: Interface %s does not exist!\n", 1310 pr_info("%s: interface %s does not exist!\n",
1259 bond->dev->name, ifname); 1311 bond->dev->name, ifname);
1260 ret = -ENODEV; 1312 ret = -ENODEV;
1261 goto out; 1313 goto out;
@@ -1263,12 +1315,12 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
1263 1315
1264 switch (command[0]) { 1316 switch (command[0]) {
1265 case '+': 1317 case '+':
1266 pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name); 1318 pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
1267 ret = bond_enslave(bond->dev, dev); 1319 ret = bond_enslave(bond->dev, dev);
1268 break; 1320 break;
1269 1321
1270 case '-': 1322 case '-':
1271 pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name); 1323 pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
1272 ret = bond_release(bond->dev, dev); 1324 ret = bond_release(bond->dev, dev);
1273 break; 1325 break;
1274 1326
@@ -1280,7 +1332,7 @@ out:
1280 return ret; 1332 return ret;
1281 1333
1282err_no_cmd: 1334err_no_cmd:
1283 pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n", 1335 pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
1284 bond->dev->name); 1336 bond->dev->name);
1285 ret = -EPERM; 1337 ret = -EPERM;
1286 goto out; 1338 goto out;
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 433d37f6940b..12be9e1bfb0c 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -81,8 +81,8 @@ struct bonding;
81 81
82struct bond_option { 82struct bond_option {
83 int id; 83 int id;
84 char *name; 84 const char *name;
85 char *desc; 85 const char *desc;
86 u32 flags; 86 u32 flags;
87 87
88 /* unsuppmodes is used to denote modes in which the option isn't 88 /* unsuppmodes is used to denote modes in which the option isn't
@@ -92,18 +92,19 @@ struct bond_option {
92 /* supported values which this option can have, can be a subset of 92 /* supported values which this option can have, can be a subset of
93 * BOND_OPTVAL_RANGE's value range 93 * BOND_OPTVAL_RANGE's value range
94 */ 94 */
95 struct bond_opt_value *values; 95 const struct bond_opt_value *values;
96 96
97 int (*set)(struct bonding *bond, struct bond_opt_value *val); 97 int (*set)(struct bonding *bond, const struct bond_opt_value *val);
98}; 98};
99 99
100int __bond_opt_set(struct bonding *bond, unsigned int option, 100int __bond_opt_set(struct bonding *bond, unsigned int option,
101 struct bond_opt_value *val); 101 struct bond_opt_value *val);
102int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf); 102int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
103struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, 103
104 struct bond_opt_value *val); 104const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
105struct bond_option *bond_opt_get(unsigned int option); 105 struct bond_opt_value *val);
106struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val); 106const struct bond_option *bond_opt_get(unsigned int option);
107const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
107 108
108/* This helper is used to initialize a bond_opt_value structure for parameter 109/* This helper is used to initialize a bond_opt_value structure for parameter
109 * passing. There should be either a valid string or value, but not both. 110 * passing. There should be either a valid string or value, but not both.
@@ -122,49 +123,6 @@ static inline void __bond_opt_init(struct bond_opt_value *optval,
122#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value) 123#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
123#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX) 124#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
124 125
125int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval);
126int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval);
127int bond_option_xmit_hash_policy_set(struct bonding *bond,
128 struct bond_opt_value *newval);
129int bond_option_arp_validate_set(struct bonding *bond,
130 struct bond_opt_value *newval);
131int bond_option_arp_all_targets_set(struct bonding *bond,
132 struct bond_opt_value *newval);
133int bond_option_fail_over_mac_set(struct bonding *bond,
134 struct bond_opt_value *newval);
135int bond_option_arp_interval_set(struct bonding *bond,
136 struct bond_opt_value *newval);
137int bond_option_arp_ip_targets_set(struct bonding *bond,
138 struct bond_opt_value *newval);
139void bond_option_arp_ip_targets_clear(struct bonding *bond); 126void bond_option_arp_ip_targets_clear(struct bonding *bond);
140int bond_option_downdelay_set(struct bonding *bond, 127
141 struct bond_opt_value *newval);
142int bond_option_updelay_set(struct bonding *bond,
143 struct bond_opt_value *newval);
144int bond_option_lacp_rate_set(struct bonding *bond,
145 struct bond_opt_value *newval);
146int bond_option_min_links_set(struct bonding *bond,
147 struct bond_opt_value *newval);
148int bond_option_ad_select_set(struct bonding *bond,
149 struct bond_opt_value *newval);
150int bond_option_num_peer_notif_set(struct bonding *bond,
151 struct bond_opt_value *newval);
152int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval);
153int bond_option_primary_set(struct bonding *bond,
154 struct bond_opt_value *newval);
155int bond_option_primary_reselect_set(struct bonding *bond,
156 struct bond_opt_value *newval);
157int bond_option_use_carrier_set(struct bonding *bond,
158 struct bond_opt_value *newval);
159int bond_option_active_slave_set(struct bonding *bond,
160 struct bond_opt_value *newval);
161int bond_option_queue_id_set(struct bonding *bond,
162 struct bond_opt_value *newval);
163int bond_option_all_slaves_active_set(struct bonding *bond,
164 struct bond_opt_value *newval);
165int bond_option_resend_igmp_set(struct bonding *bond,
166 struct bond_opt_value *newval);
167int bond_option_lp_interval_set(struct bonding *bond,
168 struct bond_opt_value *newval);
169int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval);
170#endif /* _BOND_OPTIONS_H */ 128#endif /* _BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 3ac20e78eafc..013fdd0f45e9 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -65,13 +65,11 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
65static void bond_info_show_master(struct seq_file *seq) 65static void bond_info_show_master(struct seq_file *seq)
66{ 66{
67 struct bonding *bond = seq->private; 67 struct bonding *bond = seq->private;
68 struct bond_opt_value *optval; 68 const struct bond_opt_value *optval;
69 struct slave *curr; 69 struct slave *curr;
70 int i; 70 int i;
71 71
72 read_lock(&bond->curr_slave_lock); 72 curr = rcu_dereference(bond->curr_active_slave);
73 curr = bond->curr_active_slave;
74 read_unlock(&bond->curr_slave_lock);
75 73
76 seq_printf(seq, "Bonding Mode: %s", 74 seq_printf(seq, "Bonding Mode: %s",
77 bond_mode_name(bond->params.mode)); 75 bond_mode_name(bond->params.mode));
@@ -254,8 +252,8 @@ void bond_create_proc_entry(struct bonding *bond)
254 S_IRUGO, bn->proc_dir, 252 S_IRUGO, bn->proc_dir,
255 &bond_info_fops, bond); 253 &bond_info_fops, bond);
256 if (bond->proc_entry == NULL) 254 if (bond->proc_entry == NULL)
257 pr_warning("Warning: Cannot create /proc/net/%s/%s\n", 255 pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
258 DRV_NAME, bond_dev->name); 256 DRV_NAME, bond_dev->name);
259 else 257 else
260 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); 258 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
261 } 259 }
@@ -281,8 +279,8 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
281 if (!bn->proc_dir) { 279 if (!bn->proc_dir) {
282 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); 280 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
283 if (!bn->proc_dir) 281 if (!bn->proc_dir)
284 pr_warning("Warning: cannot create /proc/net/%s\n", 282 pr_warn("Warning: Cannot create /proc/net/%s\n",
285 DRV_NAME); 283 DRV_NAME);
286 } 284 }
287} 285}
288 286
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 643fcc110299..0e8b268da0a0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -117,9 +117,9 @@ static ssize_t bonding_store_bonds(struct class *cls,
117 rv = bond_create(bn->net, ifname); 117 rv = bond_create(bn->net, ifname);
118 if (rv) { 118 if (rv) {
119 if (rv == -EEXIST) 119 if (rv == -EEXIST)
120 pr_info("%s already exists.\n", ifname); 120 pr_info("%s already exists\n", ifname);
121 else 121 else
122 pr_info("%s creation failed.\n", ifname); 122 pr_info("%s creation failed\n", ifname);
123 res = rv; 123 res = rv;
124 } 124 }
125 } else if (command[0] == '-') { 125 } else if (command[0] == '-') {
@@ -144,7 +144,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
144 return res; 144 return res;
145 145
146err_no_cmd: 146err_no_cmd:
147 pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n"); 147 pr_err("no command found in bonding_masters - use +ifname or -ifname\n");
148 return -EPERM; 148 return -EPERM;
149} 149}
150 150
@@ -220,7 +220,7 @@ static ssize_t bonding_show_mode(struct device *d,
220 struct device_attribute *attr, char *buf) 220 struct device_attribute *attr, char *buf)
221{ 221{
222 struct bonding *bond = to_bond(d); 222 struct bonding *bond = to_bond(d);
223 struct bond_opt_value *val; 223 const struct bond_opt_value *val;
224 224
225 val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode); 225 val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
226 226
@@ -251,7 +251,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
251 char *buf) 251 char *buf)
252{ 252{
253 struct bonding *bond = to_bond(d); 253 struct bonding *bond = to_bond(d);
254 struct bond_opt_value *val; 254 const struct bond_opt_value *val;
255 255
256 val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy); 256 val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
257 257
@@ -282,7 +282,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
282 char *buf) 282 char *buf)
283{ 283{
284 struct bonding *bond = to_bond(d); 284 struct bonding *bond = to_bond(d);
285 struct bond_opt_value *val; 285 const struct bond_opt_value *val;
286 286
287 val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE, 287 val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
288 bond->params.arp_validate); 288 bond->params.arp_validate);
@@ -314,7 +314,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
314 char *buf) 314 char *buf)
315{ 315{
316 struct bonding *bond = to_bond(d); 316 struct bonding *bond = to_bond(d);
317 struct bond_opt_value *val; 317 const struct bond_opt_value *val;
318 318
319 val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS, 319 val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
320 bond->params.arp_all_targets); 320 bond->params.arp_all_targets);
@@ -348,7 +348,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
348 char *buf) 348 char *buf)
349{ 349{
350 struct bonding *bond = to_bond(d); 350 struct bonding *bond = to_bond(d);
351 struct bond_opt_value *val; 351 const struct bond_opt_value *val;
352 352
353 val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC, 353 val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
354 bond->params.fail_over_mac); 354 bond->params.fail_over_mac);
@@ -505,7 +505,7 @@ static ssize_t bonding_show_lacp(struct device *d,
505 char *buf) 505 char *buf)
506{ 506{
507 struct bonding *bond = to_bond(d); 507 struct bonding *bond = to_bond(d);
508 struct bond_opt_value *val; 508 const struct bond_opt_value *val;
509 509
510 val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast); 510 val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
511 511
@@ -558,7 +558,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
558 char *buf) 558 char *buf)
559{ 559{
560 struct bonding *bond = to_bond(d); 560 struct bonding *bond = to_bond(d);
561 struct bond_opt_value *val; 561 const struct bond_opt_value *val;
562 562
563 val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select); 563 val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
564 564
@@ -686,7 +686,7 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
686 char *buf) 686 char *buf)
687{ 687{
688 struct bonding *bond = to_bond(d); 688 struct bonding *bond = to_bond(d);
689 struct bond_opt_value *val; 689 const struct bond_opt_value *val;
690 690
691 val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT, 691 val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
692 bond->params.primary_reselect); 692 bond->params.primary_reselect);
@@ -1135,7 +1135,7 @@ int bond_create_sysfs(struct bond_net *bn)
1135 /* Is someone being kinky and naming a device bonding_master? */ 1135 /* Is someone being kinky and naming a device bonding_master? */
1136 if (__dev_get_by_name(bn->net, 1136 if (__dev_get_by_name(bn->net,
1137 class_attr_bonding_masters.attr.name)) 1137 class_attr_bonding_masters.attr.name))
1138 pr_err("network device named %s already exists in sysfs", 1138 pr_err("network device named %s already exists in sysfs\n",
1139 class_attr_bonding_masters.attr.name); 1139 class_attr_bonding_masters.attr.name);
1140 ret = 0; 1140 ret = 0;
1141 } 1141 }
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2b0fdec695f7..b8bdd0acc8f3 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -188,8 +188,9 @@ struct slave {
188 struct net_device *dev; /* first - useful for panic debug */ 188 struct net_device *dev; /* first - useful for panic debug */
189 struct bonding *bond; /* our master */ 189 struct bonding *bond; /* our master */
190 int delay; 190 int delay;
191 unsigned long jiffies; 191 /* all three in jiffies */
192 unsigned long last_arp_rx; 192 unsigned long last_link_up;
193 unsigned long last_rx;
193 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; 194 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
194 s8 link; /* one of BOND_LINK_XXXX */ 195 s8 link; /* one of BOND_LINK_XXXX */
195 s8 new_link; 196 s8 new_link;
@@ -265,6 +266,11 @@ struct bonding {
265#define bond_slave_get_rtnl(dev) \ 266#define bond_slave_get_rtnl(dev) \
266 ((struct slave *) rtnl_dereference(dev->rx_handler_data)) 267 ((struct slave *) rtnl_dereference(dev->rx_handler_data))
267 268
269struct bond_vlan_tag {
270 __be16 vlan_proto;
271 unsigned short vlan_id;
272};
273
268/** 274/**
269 * Returns NULL if the net_device does not belong to any of the bond's slaves 275 * Returns NULL if the net_device does not belong to any of the bond's slaves
270 * 276 *
@@ -292,7 +298,7 @@ static inline void bond_set_active_slave(struct slave *slave)
292{ 298{
293 if (slave->backup) { 299 if (slave->backup) {
294 slave->backup = 0; 300 slave->backup = 0;
295 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL); 301 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
296 } 302 }
297} 303}
298 304
@@ -300,7 +306,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
300{ 306{
301 if (!slave->backup) { 307 if (!slave->backup) {
302 slave->backup = 1; 308 slave->backup = 1;
303 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL); 309 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
304 } 310 }
305} 311}
306 312
@@ -312,7 +318,7 @@ static inline void bond_set_slave_state(struct slave *slave,
312 318
313 slave->backup = slave_state; 319 slave->backup = slave_state;
314 if (notify) { 320 if (notify) {
315 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL); 321 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
316 slave->should_notify = 0; 322 slave->should_notify = 0;
317 } else { 323 } else {
318 if (slave->should_notify) 324 if (slave->should_notify)
@@ -342,7 +348,7 @@ static inline void bond_slave_state_notify(struct bonding *bond)
342 348
343 bond_for_each_slave(bond, tmp, iter) { 349 bond_for_each_slave(bond, tmp, iter) {
344 if (tmp->should_notify) { 350 if (tmp->should_notify) {
345 rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL); 351 rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
346 tmp->should_notify = 0; 352 tmp->should_notify = 0;
347 } 353 }
348 } 354 }
@@ -374,6 +380,11 @@ static inline bool bond_is_active_slave(struct slave *slave)
374#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) 380#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
375#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ 381#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
376 BOND_ARP_VALIDATE_BACKUP) 382 BOND_ARP_VALIDATE_BACKUP)
383#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
384#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
385 BOND_ARP_FILTER)
386#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
387 BOND_ARP_FILTER)
377 388
378#define BOND_SLAVE_NOTIFY_NOW true 389#define BOND_SLAVE_NOTIFY_NOW true
379#define BOND_SLAVE_NOTIFY_LATER false 390#define BOND_SLAVE_NOTIFY_LATER false
@@ -384,6 +395,12 @@ static inline int slave_do_arp_validate(struct bonding *bond,
384 return bond->params.arp_validate & (1 << bond_slave_state(slave)); 395 return bond->params.arp_validate & (1 << bond_slave_state(slave));
385} 396}
386 397
398static inline int slave_do_arp_validate_only(struct bonding *bond,
399 struct slave *slave)
400{
401 return bond->params.arp_validate & BOND_ARP_FILTER;
402}
403
387/* Get the oldest arp which we've received on this slave for bond's 404/* Get the oldest arp which we've received on this slave for bond's
388 * arp_targets. 405 * arp_targets.
389 */ 406 */
@@ -403,14 +420,10 @@ static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
403static inline unsigned long slave_last_rx(struct bonding *bond, 420static inline unsigned long slave_last_rx(struct bonding *bond,
404 struct slave *slave) 421 struct slave *slave)
405{ 422{
406 if (slave_do_arp_validate(bond, slave)) { 423 if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
407 if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) 424 return slave_oldest_target_arp_rx(bond, slave);
408 return slave_oldest_target_arp_rx(bond, slave);
409 else
410 return slave->last_arp_rx;
411 }
412 425
413 return slave->dev->last_rx; 426 return slave->last_rx;
414} 427}
415 428
416#ifdef CONFIG_NET_POLL_CONTROLLER 429#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -487,8 +500,6 @@ void bond_sysfs_slave_del(struct slave *slave);
487int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 500int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
488int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); 501int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
489int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count); 502int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
490int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
491int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
492void bond_select_active_slave(struct bonding *bond); 503void bond_select_active_slave(struct bonding *bond);
493void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 504void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
494void bond_create_debugfs(void); 505void bond_create_debugfs(void);
@@ -501,8 +512,6 @@ void bond_setup(struct net_device *bond_dev);
501unsigned int bond_get_num_tx_queues(void); 512unsigned int bond_get_num_tx_queues(void);
502int bond_netlink_init(void); 513int bond_netlink_init(void);
503void bond_netlink_fini(void); 514void bond_netlink_fini(void);
504int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
505int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
506struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); 515struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
507struct net_device *bond_option_active_slave_get(struct bonding *bond); 516struct net_device *bond_option_active_slave_get(struct bonding *bond);
508const char *bond_slave_link_status(s8 link); 517const char *bond_slave_link_status(s8 link);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 88a6a5810ec6..fc73865bb83a 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -204,7 +204,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
204 204
205 skb->protocol = htons(ETH_P_CAIF); 205 skb->protocol = htons(ETH_P_CAIF);
206 skb_reset_mac_header(skb); 206 skb_reset_mac_header(skb);
207 skb->dev = ser->dev;
208 debugfs_rx(ser, data, count); 207 debugfs_rx(ser, data, count);
209 /* Push received packet up the stack. */ 208 /* Push received packet up the stack. */
210 ret = netif_rx_ni(skb); 209 ret = netif_rx_ni(skb);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 155db68e13ba..ff54c0eb2052 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -554,7 +554,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
554 554
555 skb->protocol = htons(ETH_P_CAIF); 555 skb->protocol = htons(ETH_P_CAIF);
556 skb_reset_mac_header(skb); 556 skb_reset_mac_header(skb);
557 skb->dev = cfspi->ndev;
558 557
559 /* 558 /*
560 * Push received packet up the stack. 559 * Push received packet up the stack.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 6efe27458116..f07fa89b5fd5 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -420,7 +420,11 @@ static void at91_chip_start(struct net_device *dev)
420 at91_transceiver_switch(priv, 1); 420 at91_transceiver_switch(priv, 1);
421 421
422 /* enable chip */ 422 /* enable chip */
423 at91_write(priv, AT91_MR, AT91_MR_CANEN); 423 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
424 reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
425 else
426 reg_mr = AT91_MR_CANEN;
427 at91_write(priv, AT91_MR, reg_mr);
424 428
425 priv->can.state = CAN_STATE_ERROR_ACTIVE; 429 priv->can.state = CAN_STATE_ERROR_ACTIVE;
426 430
@@ -1190,6 +1194,7 @@ static const struct net_device_ops at91_netdev_ops = {
1190 .ndo_open = at91_open, 1194 .ndo_open = at91_open,
1191 .ndo_stop = at91_close, 1195 .ndo_stop = at91_close,
1192 .ndo_start_xmit = at91_start_xmit, 1196 .ndo_start_xmit = at91_start_xmit,
1197 .ndo_change_mtu = can_change_mtu,
1193}; 1198};
1194 1199
1195static ssize_t at91_sysfs_show_mb0_id(struct device *dev, 1200static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
@@ -1341,7 +1346,8 @@ static int at91_can_probe(struct platform_device *pdev)
1341 priv->can.bittiming_const = &at91_bittiming_const; 1346 priv->can.bittiming_const = &at91_bittiming_const;
1342 priv->can.do_set_mode = at91_set_mode; 1347 priv->can.do_set_mode = at91_set_mode;
1343 priv->can.do_get_berr_counter = at91_get_berr_counter; 1348 priv->can.do_get_berr_counter = at91_get_berr_counter;
1344 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 1349 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
1350 CAN_CTRLMODE_LISTENONLY;
1345 priv->dev = dev; 1351 priv->dev = dev;
1346 priv->reg_base = addr; 1352 priv->reg_base = addr;
1347 priv->devtype_data = *devtype_data; 1353 priv->devtype_data = *devtype_data;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 8d2b89a12e09..543ecceb33e9 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -528,6 +528,7 @@ static const struct net_device_ops bfin_can_netdev_ops = {
528 .ndo_open = bfin_can_open, 528 .ndo_open = bfin_can_open,
529 .ndo_stop = bfin_can_close, 529 .ndo_stop = bfin_can_close,
530 .ndo_start_xmit = bfin_can_start_xmit, 530 .ndo_start_xmit = bfin_can_start_xmit,
531 .ndo_change_mtu = can_change_mtu,
531}; 532};
532 533
533static int bfin_can_probe(struct platform_device *pdev) 534static int bfin_can_probe(struct platform_device *pdev)
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 951bfede8f3d..a5c8dcfa8357 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -114,6 +114,14 @@
114 IF_COMM_CONTROL | IF_COMM_TXRQST | \ 114 IF_COMM_CONTROL | IF_COMM_TXRQST | \
115 IF_COMM_DATAA | IF_COMM_DATAB) 115 IF_COMM_DATAA | IF_COMM_DATAB)
116 116
117/* For the low buffers we clear the interrupt bit, but keep newdat */
118#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
119 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
120 IF_COMM_DATAA | IF_COMM_DATAB)
121
122/* For the high buffers we clear the interrupt bit and newdat */
123#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST)
124
117/* IFx arbitration */ 125/* IFx arbitration */
118#define IF_ARB_MSGVAL BIT(15) 126#define IF_ARB_MSGVAL BIT(15)
119#define IF_ARB_MSGXTD BIT(14) 127#define IF_ARB_MSGXTD BIT(14)
@@ -122,7 +130,6 @@
122/* IFx message control */ 130/* IFx message control */
123#define IF_MCONT_NEWDAT BIT(15) 131#define IF_MCONT_NEWDAT BIT(15)
124#define IF_MCONT_MSGLST BIT(14) 132#define IF_MCONT_MSGLST BIT(14)
125#define IF_MCONT_CLR_MSGLST (0 << 14)
126#define IF_MCONT_INTPND BIT(13) 133#define IF_MCONT_INTPND BIT(13)
127#define IF_MCONT_UMASK BIT(12) 134#define IF_MCONT_UMASK BIT(12)
128#define IF_MCONT_TXIE BIT(11) 135#define IF_MCONT_TXIE BIT(11)
@@ -133,31 +140,10 @@
133#define IF_MCONT_DLC_MASK 0xf 140#define IF_MCONT_DLC_MASK 0xf
134 141
135/* 142/*
136 * IFx register masks: 143 * Use IF1 for RX and IF2 for TX
137 * allow easy operation on 16-bit registers when the
138 * argument is 32-bit instead
139 */ 144 */
140#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF) 145#define IF_RX 0
141#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16) 146#define IF_TX 1
142
143/* message object split */
144#define C_CAN_NO_OF_OBJECTS 32
145#define C_CAN_MSG_OBJ_RX_NUM 16
146#define C_CAN_MSG_OBJ_TX_NUM 16
147
148#define C_CAN_MSG_OBJ_RX_FIRST 1
149#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
150 C_CAN_MSG_OBJ_RX_NUM - 1)
151
152#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
153#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
154 C_CAN_MSG_OBJ_TX_NUM - 1)
155
156#define C_CAN_MSG_OBJ_RX_SPLIT 9
157#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
158
159#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
160#define RECEIVE_OBJECT_BITS 0x0000ffff
161 147
162/* status interrupt */ 148/* status interrupt */
163#define STATUS_INTERRUPT 0x8000 149#define STATUS_INTERRUPT 0x8000
@@ -246,10 +232,9 @@ static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
246 C_CAN_MSG_OBJ_TX_FIRST; 232 C_CAN_MSG_OBJ_TX_FIRST;
247} 233}
248 234
249static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv) 235static inline int get_tx_echo_msg_obj(int txecho)
250{ 236{
251 return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) + 237 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
252 C_CAN_MSG_OBJ_TX_FIRST;
253} 238}
254 239
255static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index) 240static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
@@ -366,18 +351,6 @@ static void c_can_write_msg_object(struct net_device *dev,
366 c_can_object_put(dev, iface, objno, IF_COMM_ALL); 351 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
367} 352}
368 353
369static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
370 int iface, int ctrl_mask,
371 int obj)
372{
373 struct c_can_priv *priv = netdev_priv(dev);
374
375 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
376 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
377 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
378
379}
380
381static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, 354static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
382 int iface, 355 int iface,
383 int ctrl_mask) 356 int ctrl_mask)
@@ -387,45 +360,27 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
387 360
388 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { 361 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
389 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 362 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
390 ctrl_mask & ~(IF_MCONT_MSGLST | 363 ctrl_mask & ~IF_MCONT_NEWDAT);
391 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
392 c_can_object_put(dev, iface, i, IF_COMM_CONTROL); 364 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
393 } 365 }
394} 366}
395 367
396static inline void c_can_activate_rx_msg_obj(struct net_device *dev, 368static int c_can_handle_lost_msg_obj(struct net_device *dev,
397 int iface, int ctrl_mask, 369 int iface, int objno, u32 ctrl)
398 int obj)
399{
400 struct c_can_priv *priv = netdev_priv(dev);
401
402 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
403 ctrl_mask & ~(IF_MCONT_MSGLST |
404 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
405 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
406}
407
408static void c_can_handle_lost_msg_obj(struct net_device *dev,
409 int iface, int objno)
410{ 370{
411 struct c_can_priv *priv = netdev_priv(dev);
412 struct net_device_stats *stats = &dev->stats; 371 struct net_device_stats *stats = &dev->stats;
413 struct sk_buff *skb; 372 struct c_can_priv *priv = netdev_priv(dev);
414 struct can_frame *frame; 373 struct can_frame *frame;
374 struct sk_buff *skb;
415 375
416 netdev_err(dev, "msg lost in buffer %d\n", objno); 376 ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
417 377 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
418 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 378 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
419
420 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
421 IF_MCONT_CLR_MSGLST);
422
423 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
424 379
425 /* create an error msg */ 380 /* create an error msg */
426 skb = alloc_can_err_skb(dev, &frame); 381 skb = alloc_can_err_skb(dev, &frame);
427 if (unlikely(!skb)) 382 if (unlikely(!skb))
428 return; 383 return 0;
429 384
430 frame->can_id |= CAN_ERR_CRTL; 385 frame->can_id |= CAN_ERR_CRTL;
431 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 386 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -433,6 +388,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
433 stats->rx_over_errors++; 388 stats->rx_over_errors++;
434 389
435 netif_receive_skb(skb); 390 netif_receive_skb(skb);
391 return 1;
436} 392}
437 393
438static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) 394static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
@@ -477,9 +433,6 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
477 433
478 stats->rx_packets++; 434 stats->rx_packets++;
479 stats->rx_bytes += frame->can_dlc; 435 stats->rx_bytes += frame->can_dlc;
480
481 can_led_event(dev, CAN_LED_EVENT_RX);
482
483 return 0; 436 return 0;
484} 437}
485 438
@@ -548,10 +501,12 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
548 if (can_dropped_invalid_skb(dev, skb)) 501 if (can_dropped_invalid_skb(dev, skb))
549 return NETDEV_TX_OK; 502 return NETDEV_TX_OK;
550 503
504 spin_lock_bh(&priv->xmit_lock);
551 msg_obj_no = get_tx_next_msg_obj(priv); 505 msg_obj_no = get_tx_next_msg_obj(priv);
552 506
553 /* prepare message object for transmission */ 507 /* prepare message object for transmission */
554 c_can_write_msg_object(dev, 0, frame, msg_obj_no); 508 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
509 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
555 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 510 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
556 511
557 /* 512 /*
@@ -562,10 +517,26 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
562 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || 517 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
563 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) 518 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
564 netif_stop_queue(dev); 519 netif_stop_queue(dev);
520 spin_unlock_bh(&priv->xmit_lock);
565 521
566 return NETDEV_TX_OK; 522 return NETDEV_TX_OK;
567} 523}
568 524
525static int c_can_wait_for_ctrl_init(struct net_device *dev,
526 struct c_can_priv *priv, u32 init)
527{
528 int retry = 0;
529
530 while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
531 udelay(10);
532 if (retry++ > 1000) {
533 netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
534 return -EIO;
535 }
536 }
537 return 0;
538}
539
569static int c_can_set_bittiming(struct net_device *dev) 540static int c_can_set_bittiming(struct net_device *dev)
570{ 541{
571 unsigned int reg_btr, reg_brpe, ctrl_save; 542 unsigned int reg_btr, reg_brpe, ctrl_save;
@@ -573,6 +544,7 @@ static int c_can_set_bittiming(struct net_device *dev)
573 u32 ten_bit_brp; 544 u32 ten_bit_brp;
574 struct c_can_priv *priv = netdev_priv(dev); 545 struct c_can_priv *priv = netdev_priv(dev);
575 const struct can_bittiming *bt = &priv->can.bittiming; 546 const struct can_bittiming *bt = &priv->can.bittiming;
547 int res;
576 548
577 /* c_can provides a 6-bit brp and 4-bit brpe fields */ 549 /* c_can provides a 6-bit brp and 4-bit brpe fields */
578 ten_bit_brp = bt->brp - 1; 550 ten_bit_brp = bt->brp - 1;
@@ -590,13 +562,17 @@ static int c_can_set_bittiming(struct net_device *dev)
590 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); 562 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
591 563
592 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG); 564 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
593 priv->write_reg(priv, C_CAN_CTRL_REG, 565 ctrl_save &= ~CONTROL_INIT;
594 ctrl_save | CONTROL_CCE | CONTROL_INIT); 566 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
567 res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
568 if (res)
569 return res;
570
595 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr); 571 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
596 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe); 572 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
597 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save); 573 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
598 574
599 return 0; 575 return c_can_wait_for_ctrl_init(dev, priv, 0);
600} 576}
601 577
602/* 578/*
@@ -614,14 +590,14 @@ static void c_can_configure_msg_objects(struct net_device *dev)
614 590
615 /* first invalidate all message objects */ 591 /* first invalidate all message objects */
616 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++) 592 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
617 c_can_inval_msg_object(dev, 0, i); 593 c_can_inval_msg_object(dev, IF_RX, i);
618 594
619 /* setup receive message objects */ 595 /* setup receive message objects */
620 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) 596 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
621 c_can_setup_receive_object(dev, 0, i, 0, 0, 597 c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
622 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB); 598 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
623 599
624 c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0, 600 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
625 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK); 601 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
626} 602}
627 603
@@ -631,7 +607,7 @@ static void c_can_configure_msg_objects(struct net_device *dev)
631 * - set operating mode 607 * - set operating mode
632 * - configure message objects 608 * - configure message objects
633 */ 609 */
634static void c_can_chip_config(struct net_device *dev) 610static int c_can_chip_config(struct net_device *dev)
635{ 611{
636 struct c_can_priv *priv = netdev_priv(dev); 612 struct c_can_priv *priv = netdev_priv(dev);
637 613
@@ -668,15 +644,18 @@ static void c_can_chip_config(struct net_device *dev)
668 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 644 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
669 645
670 /* set bittiming params */ 646 /* set bittiming params */
671 c_can_set_bittiming(dev); 647 return c_can_set_bittiming(dev);
672} 648}
673 649
674static void c_can_start(struct net_device *dev) 650static int c_can_start(struct net_device *dev)
675{ 651{
676 struct c_can_priv *priv = netdev_priv(dev); 652 struct c_can_priv *priv = netdev_priv(dev);
653 int err;
677 654
678 /* basic c_can configuration */ 655 /* basic c_can configuration */
679 c_can_chip_config(dev); 656 err = c_can_chip_config(dev);
657 if (err)
658 return err;
680 659
681 priv->can.state = CAN_STATE_ERROR_ACTIVE; 660 priv->can.state = CAN_STATE_ERROR_ACTIVE;
682 661
@@ -685,6 +664,8 @@ static void c_can_start(struct net_device *dev)
685 664
686 /* enable status change, error and module interrupts */ 665 /* enable status change, error and module interrupts */
687 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); 666 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
667
668 return 0;
688} 669}
689 670
690static void c_can_stop(struct net_device *dev) 671static void c_can_stop(struct net_device *dev)
@@ -700,9 +681,13 @@ static void c_can_stop(struct net_device *dev)
700 681
701static int c_can_set_mode(struct net_device *dev, enum can_mode mode) 682static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
702{ 683{
684 int err;
685
703 switch (mode) { 686 switch (mode) {
704 case CAN_MODE_START: 687 case CAN_MODE_START:
705 c_can_start(dev); 688 err = c_can_start(dev);
689 if (err)
690 return err;
706 netif_wake_queue(dev); 691 netif_wake_queue(dev);
707 break; 692 break;
708 default: 693 default:
@@ -740,8 +725,6 @@ static int c_can_get_berr_counter(const struct net_device *dev,
740} 725}
741 726
742/* 727/*
743 * theory of operation:
744 *
745 * priv->tx_echo holds the number of the oldest can_frame put for 728 * priv->tx_echo holds the number of the oldest can_frame put for
746 * transmission into the hardware, but not yet ACKed by the CAN tx 729 * transmission into the hardware, but not yet ACKed by the CAN tx
747 * complete IRQ. 730 * complete IRQ.
@@ -752,33 +735,113 @@ static int c_can_get_berr_counter(const struct net_device *dev,
752 */ 735 */
753static void c_can_do_tx(struct net_device *dev) 736static void c_can_do_tx(struct net_device *dev)
754{ 737{
755 u32 val;
756 u32 msg_obj_no;
757 struct c_can_priv *priv = netdev_priv(dev); 738 struct c_can_priv *priv = netdev_priv(dev);
758 struct net_device_stats *stats = &dev->stats; 739 struct net_device_stats *stats = &dev->stats;
740 u32 val, obj, pkts = 0, bytes = 0;
759 741
760 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 742 spin_lock_bh(&priv->xmit_lock);
761 msg_obj_no = get_tx_echo_msg_obj(priv); 743
744 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
745 obj = get_tx_echo_msg_obj(priv->tx_echo);
762 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG); 746 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
763 if (!(val & (1 << (msg_obj_no - 1)))) { 747
764 can_get_echo_skb(dev, 748 if (val & (1 << (obj - 1)))
765 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
766 c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
767 stats->tx_bytes += priv->read_reg(priv,
768 C_CAN_IFACE(MSGCTRL_REG, 0))
769 & IF_MCONT_DLC_MASK;
770 stats->tx_packets++;
771 can_led_event(dev, CAN_LED_EVENT_TX);
772 c_can_inval_msg_object(dev, 0, msg_obj_no);
773 } else {
774 break; 749 break;
775 } 750
751 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
752 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
753 pkts++;
754 c_can_inval_msg_object(dev, IF_TX, obj);
776 } 755 }
777 756
778 /* restart queue if wrap-up or if queue stalled on last pkt */ 757 /* restart queue if wrap-up or if queue stalled on last pkt */
779 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || 758 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
780 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0)) 759 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
781 netif_wake_queue(dev); 760 netif_wake_queue(dev);
761
762 spin_unlock_bh(&priv->xmit_lock);
763
764 if (pkts) {
765 stats->tx_bytes += bytes;
766 stats->tx_packets += pkts;
767 can_led_event(dev, CAN_LED_EVENT_TX);
768 }
769}
770
771/*
772 * If we have a gap in the pending bits, that means we either
773 * raced with the hardware or failed to readout all upper
774 * objects in the last run due to quota limit.
775 */
776static u32 c_can_adjust_pending(u32 pend)
777{
778 u32 weight, lasts;
779
780 if (pend == RECEIVE_OBJECT_BITS)
781 return pend;
782
783 /*
784 * If the last set bit is larger than the number of pending
785 * bits we have a gap.
786 */
787 weight = hweight32(pend);
788 lasts = fls(pend);
789
790 /* If the bits are linear, nothing to do */
791 if (lasts == weight)
792 return pend;
793
794 /*
795 * Find the first set bit after the gap. We walk backwards
796 * from the last set bit.
797 */
798 for (lasts--; pend & (1 << (lasts - 1)); lasts--);
799
800 return pend & ~((1 << lasts) - 1);
801}
802
803static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
804 u32 pend, int quota)
805{
806 u32 pkts = 0, ctrl, obj, mcmd;
807
808 while ((obj = ffs(pend)) && quota > 0) {
809 pend &= ~BIT(obj - 1);
810
811 mcmd = obj < C_CAN_MSG_RX_LOW_LAST ?
812 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
813
814 c_can_object_get(dev, IF_RX, obj, mcmd);
815 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
816
817 if (ctrl & IF_MCONT_MSGLST) {
818 int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
819
820 pkts += n;
821 quota -= n;
822 continue;
823 }
824
825 /*
826 * This really should not happen, but this covers some
827 * odd HW behaviour. Do not remove that unless you
828 * want to brick your machine.
829 */
830 if (!(ctrl & IF_MCONT_NEWDAT))
831 continue;
832
833 /* read the data from the message object */
834 c_can_read_msg_object(dev, IF_RX, ctrl);
835
836 if (obj == C_CAN_MSG_RX_LOW_LAST)
837 /* activate all lower message objects */
838 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
839
840 pkts++;
841 quota--;
842 }
843
844 return pkts;
782} 845}
783 846
784/* 847/*
@@ -805,10 +868,8 @@ static void c_can_do_tx(struct net_device *dev)
805 */ 868 */
806static int c_can_do_rx_poll(struct net_device *dev, int quota) 869static int c_can_do_rx_poll(struct net_device *dev, int quota)
807{ 870{
808 u32 num_rx_pkts = 0;
809 unsigned int msg_obj, msg_ctrl_save;
810 struct c_can_priv *priv = netdev_priv(dev); 871 struct c_can_priv *priv = netdev_priv(dev);
811 u16 val; 872 u32 pkts = 0, pend = 0, toread, n;
812 873
813 /* 874 /*
814 * It is faster to read only one 16bit register. This is only possible 875 * It is faster to read only one 16bit register. This is only possible
@@ -817,49 +878,31 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
817 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16, 878 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
818 "Implementation does not support more message objects than 16"); 879 "Implementation does not support more message objects than 16");
819 880
820 while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) { 881 while (quota > 0) {
821 while ((msg_obj = ffs(val)) && quota > 0) { 882 if (!pend) {
822 val &= ~BIT(msg_obj - 1); 883 pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
823 884 if (!pend)
824 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL & 885 break;
825 ~IF_COMM_TXRQST); 886 /*
826 msg_ctrl_save = priv->read_reg(priv, 887 * If the pending field has a gap, handle the
827 C_CAN_IFACE(MSGCTRL_REG, 0)); 888 * bits above the gap first.
828 889 */
829 if (msg_ctrl_save & IF_MCONT_MSGLST) { 890 toread = c_can_adjust_pending(pend);
830 c_can_handle_lost_msg_obj(dev, 0, msg_obj); 891 } else {
831 num_rx_pkts++; 892 toread = pend;
832 quota--;
833 continue;
834 }
835
836 if (msg_ctrl_save & IF_MCONT_EOB)
837 return num_rx_pkts;
838
839 if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
840 continue;
841
842 /* read the data from the message object */
843 c_can_read_msg_object(dev, 0, msg_ctrl_save);
844
845 if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
846 c_can_mark_rx_msg_obj(dev, 0,
847 msg_ctrl_save, msg_obj);
848 else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
849 /* activate this msg obj */
850 c_can_activate_rx_msg_obj(dev, 0,
851 msg_ctrl_save, msg_obj);
852 else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
853 /* activate all lower message objects */
854 c_can_activate_all_lower_rx_msg_obj(dev,
855 0, msg_ctrl_save);
856
857 num_rx_pkts++;
858 quota--;
859 } 893 }
894 /* Remove the bits from pend */
895 pend &= ~toread;
896 /* Read the objects */
897 n = c_can_read_objects(dev, priv, toread, quota);
898 pkts += n;
899 quota -= n;
860 } 900 }
861 901
862 return num_rx_pkts; 902 if (pkts)
903 can_led_event(dev, CAN_LED_EVENT_RX);
904
905 return pkts;
863} 906}
864 907
865static inline int c_can_has_and_handle_berr(struct c_can_priv *priv) 908static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
@@ -1133,17 +1176,20 @@ static int c_can_open(struct net_device *dev)
1133 goto exit_irq_fail; 1176 goto exit_irq_fail;
1134 } 1177 }
1135 1178
1136 napi_enable(&priv->napi); 1179 /* start the c_can controller */
1180 err = c_can_start(dev);
1181 if (err)
1182 goto exit_start_fail;
1137 1183
1138 can_led_event(dev, CAN_LED_EVENT_OPEN); 1184 can_led_event(dev, CAN_LED_EVENT_OPEN);
1139 1185
1140 /* start the c_can controller */ 1186 napi_enable(&priv->napi);
1141 c_can_start(dev);
1142
1143 netif_start_queue(dev); 1187 netif_start_queue(dev);
1144 1188
1145 return 0; 1189 return 0;
1146 1190
1191exit_start_fail:
1192 free_irq(dev->irq, dev);
1147exit_irq_fail: 1193exit_irq_fail:
1148 close_candev(dev); 1194 close_candev(dev);
1149exit_open_fail: 1195exit_open_fail:
@@ -1180,6 +1226,7 @@ struct net_device *alloc_c_can_dev(void)
1180 return NULL; 1226 return NULL;
1181 1227
1182 priv = netdev_priv(dev); 1228 priv = netdev_priv(dev);
1229 spin_lock_init(&priv->xmit_lock);
1183 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); 1230 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1184 1231
1185 priv->dev = dev; 1232 priv->dev = dev;
@@ -1260,15 +1307,16 @@ int c_can_power_up(struct net_device *dev)
1260 if (time_after(jiffies, time_out)) 1307 if (time_after(jiffies, time_out))
1261 return -ETIMEDOUT; 1308 return -ETIMEDOUT;
1262 1309
1263 c_can_start(dev); 1310 return c_can_start(dev);
1264
1265 return 0;
1266} 1311}
1267EXPORT_SYMBOL_GPL(c_can_power_up); 1312EXPORT_SYMBOL_GPL(c_can_power_up);
1268#endif 1313#endif
1269 1314
1270void free_c_can_dev(struct net_device *dev) 1315void free_c_can_dev(struct net_device *dev)
1271{ 1316{
1317 struct c_can_priv *priv = netdev_priv(dev);
1318
1319 netif_napi_del(&priv->napi);
1272 free_candev(dev); 1320 free_candev(dev);
1273} 1321}
1274EXPORT_SYMBOL_GPL(free_c_can_dev); 1322EXPORT_SYMBOL_GPL(free_c_can_dev);
@@ -1277,6 +1325,7 @@ static const struct net_device_ops c_can_netdev_ops = {
1277 .ndo_open = c_can_open, 1325 .ndo_open = c_can_open,
1278 .ndo_stop = c_can_close, 1326 .ndo_stop = c_can_close,
1279 .ndo_start_xmit = c_can_start_xmit, 1327 .ndo_start_xmit = c_can_start_xmit,
1328 .ndo_change_mtu = can_change_mtu,
1280}; 1329};
1281 1330
1282int register_c_can_dev(struct net_device *dev) 1331int register_c_can_dev(struct net_device *dev)
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index d2e1c21b143f..faa8404162b3 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,6 +22,33 @@
22#ifndef C_CAN_H 22#ifndef C_CAN_H
23#define C_CAN_H 23#define C_CAN_H
24 24
25/*
26 * IFx register masks:
27 * allow easy operation on 16-bit registers when the
28 * argument is 32-bit instead
29 */
30#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
31#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
32
33/* message object split */
34#define C_CAN_NO_OF_OBJECTS 32
35#define C_CAN_MSG_OBJ_RX_NUM 16
36#define C_CAN_MSG_OBJ_TX_NUM 16
37
38#define C_CAN_MSG_OBJ_RX_FIRST 1
39#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
40 C_CAN_MSG_OBJ_RX_NUM - 1)
41
42#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
43#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
44 C_CAN_MSG_OBJ_TX_NUM - 1)
45
46#define C_CAN_MSG_OBJ_RX_SPLIT 9
47#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
48
49#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
50#define RECEIVE_OBJECT_BITS 0x0000ffff
51
25enum reg { 52enum reg {
26 C_CAN_CTRL_REG = 0, 53 C_CAN_CTRL_REG = 0,
27 C_CAN_CTRL_EX_REG, 54 C_CAN_CTRL_EX_REG,
@@ -156,6 +183,7 @@ struct c_can_priv {
156 struct napi_struct napi; 183 struct napi_struct napi;
157 struct net_device *dev; 184 struct net_device *dev;
158 struct device *device; 185 struct device *device;
186 spinlock_t xmit_lock;
159 int tx_object; 187 int tx_object;
160 int current_status; 188 int current_status;
161 int last_status; 189 int last_status;
@@ -172,6 +200,7 @@ struct c_can_priv {
172 u32 __iomem *raminit_ctrlreg; 200 u32 __iomem *raminit_ctrlreg;
173 unsigned int instance; 201 unsigned int instance;
174 void (*raminit) (const struct c_can_priv *priv, bool enable); 202 void (*raminit) (const struct c_can_priv *priv, bool enable);
203 u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
175}; 204};
176 205
177struct net_device *alloc_c_can_dev(void); 206struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index d66ac265269c..806d92753427 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -37,8 +37,10 @@
37 37
38#include "c_can.h" 38#include "c_can.h"
39 39
40#define CAN_RAMINIT_START_MASK(i) (1 << (i)) 40#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
41 41#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
42#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
43static DEFINE_SPINLOCK(raminit_lock);
42/* 44/*
43 * 16-bit c_can registers can be arranged differently in the memory 45 * 16-bit c_can registers can be arranged differently in the memory
44 * architecture of different implementations. For example: 16-bit 46 * architecture of different implementations. For example: 16-bit
@@ -69,16 +71,41 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
69 writew(val, priv->base + 2 * priv->regs[index]); 71 writew(val, priv->base + 2 * priv->regs[index]);
70} 72}
71 73
74static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
75 u32 val)
76{
77 /* We look only at the bits of our instance. */
78 val &= mask;
79 while ((readl(priv->raminit_ctrlreg) & mask) != val)
80 udelay(1);
81}
82
72static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) 83static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
73{ 84{
74 u32 val; 85 u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
75 86 u32 ctrl;
76 val = readl(priv->raminit_ctrlreg); 87
77 if (enable) 88 spin_lock(&raminit_lock);
78 val |= CAN_RAMINIT_START_MASK(priv->instance); 89
79 else 90 ctrl = readl(priv->raminit_ctrlreg);
80 val &= ~CAN_RAMINIT_START_MASK(priv->instance); 91 /* We clear the done and start bit first. The start bit is
81 writel(val, priv->raminit_ctrlreg); 92 * looking at the 0 -> transition, but is not self clearing;
93 * And we clear the init done bit as well.
94 */
95 ctrl &= ~CAN_RAMINIT_START_MASK(priv->instance);
96 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
97 writel(ctrl, priv->raminit_ctrlreg);
98 ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
99 c_can_hw_raminit_wait(priv, ctrl, mask);
100
101 if (enable) {
102 /* Set start bit and wait for the done bit. */
103 ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
104 writel(ctrl, priv->raminit_ctrlreg);
105 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
106 c_can_hw_raminit_wait(priv, ctrl, mask);
107 }
108 spin_unlock(&raminit_lock);
82} 109}
83 110
84static struct platform_device_id c_can_id_table[] = { 111static struct platform_device_id c_can_id_table[] = {
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 0f12abf6591c..d8379278d648 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -823,6 +823,7 @@ static const struct net_device_ops cc770_netdev_ops = {
823 .ndo_open = cc770_open, 823 .ndo_open = cc770_open,
824 .ndo_stop = cc770_close, 824 .ndo_stop = cc770_close,
825 .ndo_start_xmit = cc770_start_xmit, 825 .ndo_start_xmit = cc770_start_xmit,
826 .ndo_change_mtu = can_change_mtu,
826}; 827};
827 828
828int register_cc770dev(struct net_device *dev) 829int register_cc770dev(struct net_device *dev)
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index fc59bc6f040b..c7a260478749 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -99,10 +99,10 @@ static int can_update_spt(const struct can_bittiming_const *btc,
99 return 1000 * (tseg + 1 - *tseg2) / (tseg + 1); 99 return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
100} 100}
101 101
102static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt) 102static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
103 const struct can_bittiming_const *btc)
103{ 104{
104 struct can_priv *priv = netdev_priv(dev); 105 struct can_priv *priv = netdev_priv(dev);
105 const struct can_bittiming_const *btc = priv->bittiming_const;
106 long rate, best_rate = 0; 106 long rate, best_rate = 0;
107 long best_error = 1000000000, error = 0; 107 long best_error = 1000000000, error = 0;
108 int best_tseg = 0, best_brp = 0, brp = 0; 108 int best_tseg = 0, best_brp = 0, brp = 0;
@@ -110,9 +110,6 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
110 int spt_error = 1000, spt = 0, sampl_pt; 110 int spt_error = 1000, spt = 0, sampl_pt;
111 u64 v64; 111 u64 v64;
112 112
113 if (!priv->bittiming_const)
114 return -ENOTSUPP;
115
116 /* Use CIA recommended sample points */ 113 /* Use CIA recommended sample points */
117 if (bt->sample_point) { 114 if (bt->sample_point) {
118 sampl_pt = bt->sample_point; 115 sampl_pt = bt->sample_point;
@@ -204,7 +201,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
204 return 0; 201 return 0;
205} 202}
206#else /* !CONFIG_CAN_CALC_BITTIMING */ 203#else /* !CONFIG_CAN_CALC_BITTIMING */
207static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt) 204static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
205 const struct can_bittiming_const *btc)
208{ 206{
209 netdev_err(dev, "bit-timing calculation not available\n"); 207 netdev_err(dev, "bit-timing calculation not available\n");
210 return -EINVAL; 208 return -EINVAL;
@@ -217,16 +215,13 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
217 * prescaler value brp. You can find more information in the header 215 * prescaler value brp. You can find more information in the header
218 * file linux/can/netlink.h. 216 * file linux/can/netlink.h.
219 */ 217 */
220static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt) 218static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
219 const struct can_bittiming_const *btc)
221{ 220{
222 struct can_priv *priv = netdev_priv(dev); 221 struct can_priv *priv = netdev_priv(dev);
223 const struct can_bittiming_const *btc = priv->bittiming_const;
224 int tseg1, alltseg; 222 int tseg1, alltseg;
225 u64 brp64; 223 u64 brp64;
226 224
227 if (!priv->bittiming_const)
228 return -ENOTSUPP;
229
230 tseg1 = bt->prop_seg + bt->phase_seg1; 225 tseg1 = bt->prop_seg + bt->phase_seg1;
231 if (!bt->sjw) 226 if (!bt->sjw)
232 bt->sjw = 1; 227 bt->sjw = 1;
@@ -254,26 +249,29 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
254 return 0; 249 return 0;
255} 250}
256 251
257static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt) 252static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
253 const struct can_bittiming_const *btc)
258{ 254{
259 struct can_priv *priv = netdev_priv(dev);
260 int err; 255 int err;
261 256
262 /* Check if the CAN device has bit-timing parameters */ 257 /* Check if the CAN device has bit-timing parameters */
263 if (priv->bittiming_const) { 258 if (!btc)
259 return -ENOTSUPP;
264 260
265 /* Non-expert mode? Check if the bitrate has been pre-defined */ 261 /*
266 if (!bt->tq) 262 * Depending on the given can_bittiming parameter structure the CAN
267 /* Determine bit-timing parameters */ 263 * timing parameters are calculated based on the provided bitrate OR
268 err = can_calc_bittiming(dev, bt); 264 * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
269 else 265 * provided directly which are then checked and fixed up.
270 /* Check bit-timing params and calculate proper brp */ 266 */
271 err = can_fixup_bittiming(dev, bt); 267 if (!bt->tq && bt->bitrate)
272 if (err) 268 err = can_calc_bittiming(dev, bt, btc);
273 return err; 269 else if (bt->tq && !bt->bitrate)
274 } 270 err = can_fixup_bittiming(dev, bt, btc);
271 else
272 err = -EINVAL;
275 273
276 return 0; 274 return err;
277} 275}
278 276
279/* 277/*
@@ -317,7 +315,9 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
317 BUG_ON(idx >= priv->echo_skb_max); 315 BUG_ON(idx >= priv->echo_skb_max);
318 316
319 /* check flag whether this packet has to be looped back */ 317 /* check flag whether this packet has to be looped back */
320 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) { 318 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
319 (skb->protocol != htons(ETH_P_CAN) &&
320 skb->protocol != htons(ETH_P_CANFD))) {
321 kfree_skb(skb); 321 kfree_skb(skb);
322 return; 322 return;
323 } 323 }
@@ -329,7 +329,6 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
329 return; 329 return;
330 330
331 /* make settings for echo to reduce code in irq context */ 331 /* make settings for echo to reduce code in irq context */
332 skb->protocol = htons(ETH_P_CAN);
333 skb->pkt_type = PACKET_BROADCAST; 332 skb->pkt_type = PACKET_BROADCAST;
334 skb->ip_summed = CHECKSUM_UNNECESSARY; 333 skb->ip_summed = CHECKSUM_UNNECESSARY;
335 skb->dev = dev; 334 skb->dev = dev;
@@ -512,6 +511,30 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
512} 511}
513EXPORT_SYMBOL_GPL(alloc_can_skb); 512EXPORT_SYMBOL_GPL(alloc_can_skb);
514 513
514struct sk_buff *alloc_canfd_skb(struct net_device *dev,
515 struct canfd_frame **cfd)
516{
517 struct sk_buff *skb;
518
519 skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
520 sizeof(struct canfd_frame));
521 if (unlikely(!skb))
522 return NULL;
523
524 skb->protocol = htons(ETH_P_CANFD);
525 skb->pkt_type = PACKET_BROADCAST;
526 skb->ip_summed = CHECKSUM_UNNECESSARY;
527
528 can_skb_reserve(skb);
529 can_skb_prv(skb)->ifindex = dev->ifindex;
530
531 *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
532 memset(*cfd, 0, sizeof(struct canfd_frame));
533
534 return skb;
535}
536EXPORT_SYMBOL_GPL(alloc_canfd_skb);
537
515struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) 538struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
516{ 539{
517 struct sk_buff *skb; 540 struct sk_buff *skb;
@@ -572,6 +595,39 @@ void free_candev(struct net_device *dev)
572EXPORT_SYMBOL_GPL(free_candev); 595EXPORT_SYMBOL_GPL(free_candev);
573 596
574/* 597/*
598 * changing MTU and control mode for CAN/CANFD devices
599 */
600int can_change_mtu(struct net_device *dev, int new_mtu)
601{
602 struct can_priv *priv = netdev_priv(dev);
603
604 /* Do not allow changing the MTU while running */
605 if (dev->flags & IFF_UP)
606 return -EBUSY;
607
608 /* allow change of MTU according to the CANFD ability of the device */
609 switch (new_mtu) {
610 case CAN_MTU:
611 priv->ctrlmode &= ~CAN_CTRLMODE_FD;
612 break;
613
614 case CANFD_MTU:
615 if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
616 return -EINVAL;
617
618 priv->ctrlmode |= CAN_CTRLMODE_FD;
619 break;
620
621 default:
622 return -EINVAL;
623 }
624
625 dev->mtu = new_mtu;
626 return 0;
627}
628EXPORT_SYMBOL_GPL(can_change_mtu);
629
630/*
575 * Common open function when the device gets opened. 631 * Common open function when the device gets opened.
576 * 632 *
577 * This function should be called in the open function of the device 633 * This function should be called in the open function of the device
@@ -581,11 +637,19 @@ int open_candev(struct net_device *dev)
581{ 637{
582 struct can_priv *priv = netdev_priv(dev); 638 struct can_priv *priv = netdev_priv(dev);
583 639
584 if (!priv->bittiming.tq && !priv->bittiming.bitrate) { 640 if (!priv->bittiming.bitrate) {
585 netdev_err(dev, "bit-timing not yet defined\n"); 641 netdev_err(dev, "bit-timing not yet defined\n");
586 return -EINVAL; 642 return -EINVAL;
587 } 643 }
588 644
645 /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
646 if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
647 (!priv->data_bittiming.bitrate ||
648 (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) {
649 netdev_err(dev, "incorrect/missing data bit-timing\n");
650 return -EINVAL;
651 }
652
589 /* Switch carrier on if device was stopped while in bus-off state */ 653 /* Switch carrier on if device was stopped while in bus-off state */
590 if (!netif_carrier_ok(dev)) 654 if (!netif_carrier_ok(dev))
591 netif_carrier_on(dev); 655 netif_carrier_on(dev);
@@ -624,6 +688,10 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
624 = { .len = sizeof(struct can_bittiming_const) }, 688 = { .len = sizeof(struct can_bittiming_const) },
625 [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, 689 [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
626 [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, 690 [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
691 [IFLA_CAN_DATA_BITTIMING]
692 = { .len = sizeof(struct can_bittiming) },
693 [IFLA_CAN_DATA_BITTIMING_CONST]
694 = { .len = sizeof(struct can_bittiming_const) },
627}; 695};
628 696
629static int can_changelink(struct net_device *dev, 697static int can_changelink(struct net_device *dev,
@@ -642,9 +710,7 @@ static int can_changelink(struct net_device *dev,
642 if (dev->flags & IFF_UP) 710 if (dev->flags & IFF_UP)
643 return -EBUSY; 711 return -EBUSY;
644 memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); 712 memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
645 if ((!bt.bitrate && !bt.tq) || (bt.bitrate && bt.tq)) 713 err = can_get_bittiming(dev, &bt, priv->bittiming_const);
646 return -EINVAL;
647 err = can_get_bittiming(dev, &bt);
648 if (err) 714 if (err)
649 return err; 715 return err;
650 memcpy(&priv->bittiming, &bt, sizeof(bt)); 716 memcpy(&priv->bittiming, &bt, sizeof(bt));
@@ -668,6 +734,12 @@ static int can_changelink(struct net_device *dev,
668 return -EOPNOTSUPP; 734 return -EOPNOTSUPP;
669 priv->ctrlmode &= ~cm->mask; 735 priv->ctrlmode &= ~cm->mask;
670 priv->ctrlmode |= cm->flags; 736 priv->ctrlmode |= cm->flags;
737
738 /* CAN_CTRLMODE_FD can only be set when driver supports FD */
739 if (priv->ctrlmode & CAN_CTRLMODE_FD)
740 dev->mtu = CANFD_MTU;
741 else
742 dev->mtu = CAN_MTU;
671 } 743 }
672 744
673 if (data[IFLA_CAN_RESTART_MS]) { 745 if (data[IFLA_CAN_RESTART_MS]) {
@@ -686,6 +758,27 @@ static int can_changelink(struct net_device *dev,
686 return err; 758 return err;
687 } 759 }
688 760
761 if (data[IFLA_CAN_DATA_BITTIMING]) {
762 struct can_bittiming dbt;
763
764 /* Do not allow changing bittiming while running */
765 if (dev->flags & IFF_UP)
766 return -EBUSY;
767 memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
768 sizeof(dbt));
769 err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
770 if (err)
771 return err;
772 memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
773
774 if (priv->do_set_data_bittiming) {
775 /* Finally, set the bit-timing registers */
776 err = priv->do_set_data_bittiming(dev);
777 if (err)
778 return err;
779 }
780 }
781
689 return 0; 782 return 0;
690} 783}
691 784
@@ -694,7 +787,8 @@ static size_t can_get_size(const struct net_device *dev)
694 struct can_priv *priv = netdev_priv(dev); 787 struct can_priv *priv = netdev_priv(dev);
695 size_t size = 0; 788 size_t size = 0;
696 789
697 size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */ 790 if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
791 size += nla_total_size(sizeof(struct can_bittiming));
698 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ 792 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
699 size += nla_total_size(sizeof(struct can_bittiming_const)); 793 size += nla_total_size(sizeof(struct can_bittiming_const));
700 size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ 794 size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
@@ -703,6 +797,10 @@ static size_t can_get_size(const struct net_device *dev)
703 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ 797 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
704 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ 798 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
705 size += nla_total_size(sizeof(struct can_berr_counter)); 799 size += nla_total_size(sizeof(struct can_berr_counter));
800 if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
801 size += nla_total_size(sizeof(struct can_bittiming));
802 if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
803 size += nla_total_size(sizeof(struct can_bittiming_const));
706 804
707 return size; 805 return size;
708} 806}
@@ -716,19 +814,34 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
716 814
717 if (priv->do_get_state) 815 if (priv->do_get_state)
718 priv->do_get_state(dev, &state); 816 priv->do_get_state(dev, &state);
719 if (nla_put(skb, IFLA_CAN_BITTIMING, 817
720 sizeof(priv->bittiming), &priv->bittiming) || 818 if ((priv->bittiming.bitrate &&
819 nla_put(skb, IFLA_CAN_BITTIMING,
820 sizeof(priv->bittiming), &priv->bittiming)) ||
821
721 (priv->bittiming_const && 822 (priv->bittiming_const &&
722 nla_put(skb, IFLA_CAN_BITTIMING_CONST, 823 nla_put(skb, IFLA_CAN_BITTIMING_CONST,
723 sizeof(*priv->bittiming_const), priv->bittiming_const)) || 824 sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
825
724 nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) || 826 nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
725 nla_put_u32(skb, IFLA_CAN_STATE, state) || 827 nla_put_u32(skb, IFLA_CAN_STATE, state) ||
726 nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || 828 nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
727 nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || 829 nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
830
728 (priv->do_get_berr_counter && 831 (priv->do_get_berr_counter &&
729 !priv->do_get_berr_counter(dev, &bec) && 832 !priv->do_get_berr_counter(dev, &bec) &&
730 nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec))) 833 nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
834
835 (priv->data_bittiming.bitrate &&
836 nla_put(skb, IFLA_CAN_DATA_BITTIMING,
837 sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
838
839 (priv->data_bittiming_const &&
840 nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
841 sizeof(*priv->data_bittiming_const),
842 priv->data_bittiming_const)))
731 return -EMSGSIZE; 843 return -EMSGSIZE;
844
732 return 0; 845 return 0;
733} 846}
734 847
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 61376abdab39..f425ec2c7839 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1011,6 +1011,7 @@ static const struct net_device_ops flexcan_netdev_ops = {
1011 .ndo_open = flexcan_open, 1011 .ndo_open = flexcan_open,
1012 .ndo_stop = flexcan_close, 1012 .ndo_stop = flexcan_close,
1013 .ndo_start_xmit = flexcan_start_xmit, 1013 .ndo_start_xmit = flexcan_start_xmit,
1014 .ndo_change_mtu = can_change_mtu,
1014}; 1015};
1015 1016
1016static int register_flexcandev(struct net_device *dev) 1017static int register_flexcandev(struct net_device *dev)
@@ -1132,9 +1133,9 @@ static int flexcan_probe(struct platform_device *pdev)
1132 of_id = of_match_device(flexcan_of_match, &pdev->dev); 1133 of_id = of_match_device(flexcan_of_match, &pdev->dev);
1133 if (of_id) { 1134 if (of_id) {
1134 devtype_data = of_id->data; 1135 devtype_data = of_id->data;
1135 } else if (pdev->id_entry->driver_data) { 1136 } else if (platform_get_device_id(pdev)->driver_data) {
1136 devtype_data = (struct flexcan_devtype_data *) 1137 devtype_data = (struct flexcan_devtype_data *)
1137 pdev->id_entry->driver_data; 1138 platform_get_device_id(pdev)->driver_data;
1138 } else { 1139 } else {
1139 return -ENODEV; 1140 return -ENODEV;
1140 } 1141 }
@@ -1201,8 +1202,7 @@ static int flexcan_remove(struct platform_device *pdev)
1201 return 0; 1202 return 0;
1202} 1203}
1203 1204
1204#ifdef CONFIG_PM_SLEEP 1205static int __maybe_unused flexcan_suspend(struct device *device)
1205static int flexcan_suspend(struct device *device)
1206{ 1206{
1207 struct net_device *dev = dev_get_drvdata(device); 1207 struct net_device *dev = dev_get_drvdata(device);
1208 struct flexcan_priv *priv = netdev_priv(dev); 1208 struct flexcan_priv *priv = netdev_priv(dev);
@@ -1221,7 +1221,7 @@ static int flexcan_suspend(struct device *device)
1221 return 0; 1221 return 0;
1222} 1222}
1223 1223
1224static int flexcan_resume(struct device *device) 1224static int __maybe_unused flexcan_resume(struct device *device)
1225{ 1225{
1226 struct net_device *dev = dev_get_drvdata(device); 1226 struct net_device *dev = dev_get_drvdata(device);
1227 struct flexcan_priv *priv = netdev_priv(dev); 1227 struct flexcan_priv *priv = netdev_priv(dev);
@@ -1233,7 +1233,6 @@ static int flexcan_resume(struct device *device)
1233 } 1233 }
1234 return flexcan_chip_enable(priv); 1234 return flexcan_chip_enable(priv);
1235} 1235}
1236#endif /* CONFIG_PM_SLEEP */
1237 1236
1238static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume); 1237static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
1239 1238
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index ab506d6cab37..3fd9fd942c6e 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1578,6 +1578,7 @@ static const struct net_device_ops grcan_netdev_ops = {
1578 .ndo_open = grcan_open, 1578 .ndo_open = grcan_open,
1579 .ndo_stop = grcan_close, 1579 .ndo_stop = grcan_close,
1580 .ndo_start_xmit = grcan_start_xmit, 1580 .ndo_start_xmit = grcan_start_xmit,
1581 .ndo_change_mtu = can_change_mtu,
1581}; 1582};
1582 1583
1583static int grcan_setup_netdev(struct platform_device *ofdev, 1584static int grcan_setup_netdev(struct platform_device *ofdev,
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 71594e5676fd..2382c04dc780 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -198,9 +198,6 @@ struct ican3_dev {
198 struct net_device *ndev; 198 struct net_device *ndev;
199 struct napi_struct napi; 199 struct napi_struct napi;
200 200
201 /* Device for printing */
202 struct device *dev;
203
204 /* module number */ 201 /* module number */
205 unsigned int num; 202 unsigned int num;
206 203
@@ -295,7 +292,7 @@ static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
295 xord = locl ^ peer; 292 xord = locl ^ peer;
296 293
297 if ((xord & MSYNC_RB_MASK) == 0x00) { 294 if ((xord & MSYNC_RB_MASK) == 0x00) {
298 dev_dbg(mod->dev, "no mbox for reading\n"); 295 netdev_dbg(mod->ndev, "no mbox for reading\n");
299 return -ENOMEM; 296 return -ENOMEM;
300 } 297 }
301 298
@@ -340,7 +337,7 @@ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
340 xord = locl ^ peer; 337 xord = locl ^ peer;
341 338
342 if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) { 339 if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
343 dev_err(mod->dev, "no mbox for writing\n"); 340 netdev_err(mod->ndev, "no mbox for writing\n");
344 return -ENOMEM; 341 return -ENOMEM;
345 } 342 }
346 343
@@ -542,7 +539,7 @@ static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
542 memcpy_fromio(&desc, desc_addr, sizeof(desc)); 539 memcpy_fromio(&desc, desc_addr, sizeof(desc));
543 540
544 if (!(desc.control & DESC_VALID)) { 541 if (!(desc.control & DESC_VALID)) {
545 dev_dbg(mod->dev, "%s: no free buffers\n", __func__); 542 netdev_dbg(mod->ndev, "%s: no free buffers\n", __func__);
546 return -ENOMEM; 543 return -ENOMEM;
547 } 544 }
548 545
@@ -573,7 +570,7 @@ static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
573 memcpy_fromio(&desc, desc_addr, sizeof(desc)); 570 memcpy_fromio(&desc, desc_addr, sizeof(desc));
574 571
575 if (!(desc.control & DESC_VALID)) { 572 if (!(desc.control & DESC_VALID)) {
576 dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__); 573 netdev_dbg(mod->ndev, "%s: no buffers to recv\n", __func__);
577 return -ENOMEM; 574 return -ENOMEM;
578 } 575 }
579 576
@@ -883,7 +880,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
883 */ 880 */
884static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg) 881static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
885{ 882{
886 dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data); 883 netdev_dbg(mod->ndev, "IDVERS response: %s\n", msg->data);
887} 884}
888 885
889static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg) 886static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
@@ -899,7 +896,7 @@ static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
899 * error frame for userspace 896 * error frame for userspace
900 */ 897 */
901 if (msg->spec == MSG_MSGLOST) { 898 if (msg->spec == MSG_MSGLOST) {
902 dev_err(mod->dev, "lost %d control messages\n", msg->data[0]); 899 netdev_err(mod->ndev, "lost %d control messages\n", msg->data[0]);
903 return; 900 return;
904 } 901 }
905 902
@@ -939,13 +936,13 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
939 936
940 /* we can only handle the SJA1000 part */ 937 /* we can only handle the SJA1000 part */
941 if (msg->data[1] != CEVTIND_CHIP_SJA1000) { 938 if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
942 dev_err(mod->dev, "unable to handle errors on non-SJA1000\n"); 939 netdev_err(mod->ndev, "unable to handle errors on non-SJA1000\n");
943 return -ENODEV; 940 return -ENODEV;
944 } 941 }
945 942
946 /* check the message length for sanity */ 943 /* check the message length for sanity */
947 if (le16_to_cpu(msg->len) < 6) { 944 if (le16_to_cpu(msg->len) < 6) {
948 dev_err(mod->dev, "error message too short\n"); 945 netdev_err(mod->ndev, "error message too short\n");
949 return -EINVAL; 946 return -EINVAL;
950 } 947 }
951 948
@@ -967,7 +964,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
967 */ 964 */
968 if (isrc == CEVTIND_BEI) { 965 if (isrc == CEVTIND_BEI) {
969 int ret; 966 int ret;
970 dev_dbg(mod->dev, "bus error interrupt\n"); 967 netdev_dbg(mod->ndev, "bus error interrupt\n");
971 968
972 /* TX error */ 969 /* TX error */
973 if (!(ecc & ECC_DIR)) { 970 if (!(ecc & ECC_DIR)) {
@@ -983,7 +980,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
983 */ 980 */
984 ret = ican3_set_buserror(mod, 1); 981 ret = ican3_set_buserror(mod, 1);
985 if (ret) { 982 if (ret) {
986 dev_err(mod->dev, "unable to re-enable bus-error\n"); 983 netdev_err(mod->ndev, "unable to re-enable bus-error\n");
987 return ret; 984 return ret;
988 } 985 }
989 986
@@ -998,7 +995,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
998 995
999 /* data overrun interrupt */ 996 /* data overrun interrupt */
1000 if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) { 997 if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
1001 dev_dbg(mod->dev, "data overrun interrupt\n"); 998 netdev_dbg(mod->ndev, "data overrun interrupt\n");
1002 cf->can_id |= CAN_ERR_CRTL; 999 cf->can_id |= CAN_ERR_CRTL;
1003 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 1000 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1004 stats->rx_over_errors++; 1001 stats->rx_over_errors++;
@@ -1007,7 +1004,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
1007 1004
1008 /* error warning + passive interrupt */ 1005 /* error warning + passive interrupt */
1009 if (isrc == CEVTIND_EI) { 1006 if (isrc == CEVTIND_EI) {
1010 dev_dbg(mod->dev, "error warning + passive interrupt\n"); 1007 netdev_dbg(mod->ndev, "error warning + passive interrupt\n");
1011 if (status & SR_BS) { 1008 if (status & SR_BS) {
1012 state = CAN_STATE_BUS_OFF; 1009 state = CAN_STATE_BUS_OFF;
1013 cf->can_id |= CAN_ERR_BUSOFF; 1010 cf->can_id |= CAN_ERR_BUSOFF;
@@ -1088,7 +1085,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
1088 complete(&mod->termination_comp); 1085 complete(&mod->termination_comp);
1089 break; 1086 break;
1090 default: 1087 default:
1091 dev_err(mod->dev, "received an unknown inquiry response\n"); 1088 netdev_err(mod->ndev, "received an unknown inquiry response\n");
1092 break; 1089 break;
1093 } 1090 }
1094} 1091}
@@ -1096,7 +1093,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
1096static void ican3_handle_unknown_message(struct ican3_dev *mod, 1093static void ican3_handle_unknown_message(struct ican3_dev *mod,
1097 struct ican3_msg *msg) 1094 struct ican3_msg *msg)
1098{ 1095{
1099 dev_warn(mod->dev, "received unknown message: spec 0x%.2x length %d\n", 1096 netdev_warn(mod->ndev, "received unknown message: spec 0x%.2x length %d\n",
1100 msg->spec, le16_to_cpu(msg->len)); 1097 msg->spec, le16_to_cpu(msg->len));
1101} 1098}
1102 1099
@@ -1105,7 +1102,7 @@ static void ican3_handle_unknown_message(struct ican3_dev *mod,
1105 */ 1102 */
1106static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) 1103static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
1107{ 1104{
1108 dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__, 1105 netdev_dbg(mod->ndev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
1109 mod->num, msg->spec, le16_to_cpu(msg->len)); 1106 mod->num, msg->spec, le16_to_cpu(msg->len));
1110 1107
1111 switch (msg->spec) { 1108 switch (msg->spec) {
@@ -1406,7 +1403,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
1406 msleep(10); 1403 msleep(10);
1407 } while (time_before(jiffies, start + HZ / 4)); 1404 } while (time_before(jiffies, start + HZ / 4));
1408 1405
1409 dev_err(mod->dev, "failed to reset CAN module\n"); 1406 netdev_err(mod->ndev, "failed to reset CAN module\n");
1410 return -ETIMEDOUT; 1407 return -ETIMEDOUT;
1411} 1408}
1412 1409
@@ -1425,7 +1422,7 @@ static int ican3_startup_module(struct ican3_dev *mod)
1425 1422
1426 ret = ican3_reset_module(mod); 1423 ret = ican3_reset_module(mod);
1427 if (ret) { 1424 if (ret) {
1428 dev_err(mod->dev, "unable to reset module\n"); 1425 netdev_err(mod->ndev, "unable to reset module\n");
1429 return ret; 1426 return ret;
1430 } 1427 }
1431 1428
@@ -1434,41 +1431,41 @@ static int ican3_startup_module(struct ican3_dev *mod)
1434 1431
1435 ret = ican3_msg_connect(mod); 1432 ret = ican3_msg_connect(mod);
1436 if (ret) { 1433 if (ret) {
1437 dev_err(mod->dev, "unable to connect to module\n"); 1434 netdev_err(mod->ndev, "unable to connect to module\n");
1438 return ret; 1435 return ret;
1439 } 1436 }
1440 1437
1441 ican3_init_new_host_interface(mod); 1438 ican3_init_new_host_interface(mod);
1442 ret = ican3_msg_newhostif(mod); 1439 ret = ican3_msg_newhostif(mod);
1443 if (ret) { 1440 if (ret) {
1444 dev_err(mod->dev, "unable to switch to new-style interface\n"); 1441 netdev_err(mod->ndev, "unable to switch to new-style interface\n");
1445 return ret; 1442 return ret;
1446 } 1443 }
1447 1444
1448 /* default to "termination on" */ 1445 /* default to "termination on" */
1449 ret = ican3_set_termination(mod, true); 1446 ret = ican3_set_termination(mod, true);
1450 if (ret) { 1447 if (ret) {
1451 dev_err(mod->dev, "unable to enable termination\n"); 1448 netdev_err(mod->ndev, "unable to enable termination\n");
1452 return ret; 1449 return ret;
1453 } 1450 }
1454 1451
1455 /* default to "bus errors enabled" */ 1452 /* default to "bus errors enabled" */
1456 ret = ican3_set_buserror(mod, 1); 1453 ret = ican3_set_buserror(mod, 1);
1457 if (ret) { 1454 if (ret) {
1458 dev_err(mod->dev, "unable to set bus-error\n"); 1455 netdev_err(mod->ndev, "unable to set bus-error\n");
1459 return ret; 1456 return ret;
1460 } 1457 }
1461 1458
1462 ican3_init_fast_host_interface(mod); 1459 ican3_init_fast_host_interface(mod);
1463 ret = ican3_msg_fasthostif(mod); 1460 ret = ican3_msg_fasthostif(mod);
1464 if (ret) { 1461 if (ret) {
1465 dev_err(mod->dev, "unable to switch to fast host interface\n"); 1462 netdev_err(mod->ndev, "unable to switch to fast host interface\n");
1466 return ret; 1463 return ret;
1467 } 1464 }
1468 1465
1469 ret = ican3_set_id_filter(mod, true); 1466 ret = ican3_set_id_filter(mod, true);
1470 if (ret) { 1467 if (ret) {
1471 dev_err(mod->dev, "unable to set acceptance filter\n"); 1468 netdev_err(mod->ndev, "unable to set acceptance filter\n");
1472 return ret; 1469 return ret;
1473 } 1470 }
1474 1471
@@ -1487,14 +1484,14 @@ static int ican3_open(struct net_device *ndev)
1487 /* open the CAN layer */ 1484 /* open the CAN layer */
1488 ret = open_candev(ndev); 1485 ret = open_candev(ndev);
1489 if (ret) { 1486 if (ret) {
1490 dev_err(mod->dev, "unable to start CAN layer\n"); 1487 netdev_err(mod->ndev, "unable to start CAN layer\n");
1491 return ret; 1488 return ret;
1492 } 1489 }
1493 1490
1494 /* bring the bus online */ 1491 /* bring the bus online */
1495 ret = ican3_set_bus_state(mod, true); 1492 ret = ican3_set_bus_state(mod, true);
1496 if (ret) { 1493 if (ret) {
1497 dev_err(mod->dev, "unable to set bus-on\n"); 1494 netdev_err(mod->ndev, "unable to set bus-on\n");
1498 close_candev(ndev); 1495 close_candev(ndev);
1499 return ret; 1496 return ret;
1500 } 1497 }
@@ -1518,7 +1515,7 @@ static int ican3_stop(struct net_device *ndev)
1518 /* bring the bus offline, stop receiving packets */ 1515 /* bring the bus offline, stop receiving packets */
1519 ret = ican3_set_bus_state(mod, false); 1516 ret = ican3_set_bus_state(mod, false);
1520 if (ret) { 1517 if (ret) {
1521 dev_err(mod->dev, "unable to set bus-off\n"); 1518 netdev_err(mod->ndev, "unable to set bus-off\n");
1522 return ret; 1519 return ret;
1523 } 1520 }
1524 1521
@@ -1545,7 +1542,7 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
1545 1542
1546 /* check that we can actually transmit */ 1543 /* check that we can actually transmit */
1547 if (!ican3_txok(mod)) { 1544 if (!ican3_txok(mod)) {
1548 dev_err(mod->dev, "BUG: no free descriptors\n"); 1545 netdev_err(mod->ndev, "BUG: no free descriptors\n");
1549 spin_unlock_irqrestore(&mod->lock, flags); 1546 spin_unlock_irqrestore(&mod->lock, flags);
1550 return NETDEV_TX_BUSY; 1547 return NETDEV_TX_BUSY;
1551 } 1548 }
@@ -1597,6 +1594,7 @@ static const struct net_device_ops ican3_netdev_ops = {
1597 .ndo_open = ican3_open, 1594 .ndo_open = ican3_open,
1598 .ndo_stop = ican3_stop, 1595 .ndo_stop = ican3_stop,
1599 .ndo_start_xmit = ican3_xmit, 1596 .ndo_start_xmit = ican3_xmit,
1597 .ndo_change_mtu = can_change_mtu,
1600}; 1598};
1601 1599
1602/* 1600/*
@@ -1657,7 +1655,7 @@ static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
1657 /* bring the bus online */ 1655 /* bring the bus online */
1658 ret = ican3_set_bus_state(mod, true); 1656 ret = ican3_set_bus_state(mod, true);
1659 if (ret) { 1657 if (ret) {
1660 dev_err(mod->dev, "unable to set bus-on\n"); 1658 netdev_err(ndev, "unable to set bus-on\n");
1661 return ret; 1659 return ret;
1662 } 1660 }
1663 1661
@@ -1682,7 +1680,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
1682 1680
1683 ret = wait_for_completion_timeout(&mod->buserror_comp, HZ); 1681 ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
1684 if (ret == 0) { 1682 if (ret == 0) {
1685 dev_info(mod->dev, "%s timed out\n", __func__); 1683 netdev_info(mod->ndev, "%s timed out\n", __func__);
1686 return -ETIMEDOUT; 1684 return -ETIMEDOUT;
1687 } 1685 }
1688 1686
@@ -1708,7 +1706,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
1708 1706
1709 ret = wait_for_completion_timeout(&mod->termination_comp, HZ); 1707 ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
1710 if (ret == 0) { 1708 if (ret == 0) {
1711 dev_info(mod->dev, "%s timed out\n", __func__); 1709 netdev_info(mod->ndev, "%s timed out\n", __func__);
1712 return -ETIMEDOUT; 1710 return -ETIMEDOUT;
1713 } 1711 }
1714 1712
@@ -1778,7 +1776,6 @@ static int ican3_probe(struct platform_device *pdev)
1778 platform_set_drvdata(pdev, ndev); 1776 platform_set_drvdata(pdev, ndev);
1779 mod = netdev_priv(ndev); 1777 mod = netdev_priv(ndev);
1780 mod->ndev = ndev; 1778 mod->ndev = ndev;
1781 mod->dev = &pdev->dev;
1782 mod->num = pdata->modno; 1779 mod->num = pdata->modno;
1783 netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); 1780 netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
1784 skb_queue_head_init(&mod->echoq); 1781 skb_queue_head_init(&mod->echoq);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index cdb9808d12db..28c11f815245 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -601,10 +601,10 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
601 (bt->prop_seg - 1)); 601 (bt->prop_seg - 1));
602 mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK, 602 mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
603 (bt->phase_seg2 - 1)); 603 (bt->phase_seg2 - 1));
604 dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n", 604 dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
605 mcp251x_read_reg(spi, CNF1), 605 mcp251x_read_reg(spi, CNF1),
606 mcp251x_read_reg(spi, CNF2), 606 mcp251x_read_reg(spi, CNF2),
607 mcp251x_read_reg(spi, CNF3)); 607 mcp251x_read_reg(spi, CNF3));
608 608
609 return 0; 609 return 0;
610} 610}
@@ -672,7 +672,7 @@ static int mcp251x_hw_probe(struct spi_device *spi)
672 672
673static int mcp251x_power_enable(struct regulator *reg, int enable) 673static int mcp251x_power_enable(struct regulator *reg, int enable)
674{ 674{
675 if (IS_ERR(reg)) 675 if (IS_ERR_OR_NULL(reg))
676 return 0; 676 return 0;
677 677
678 if (enable) 678 if (enable)
@@ -996,6 +996,7 @@ static const struct net_device_ops mcp251x_netdev_ops = {
996 .ndo_open = mcp251x_open, 996 .ndo_open = mcp251x_open,
997 .ndo_stop = mcp251x_stop, 997 .ndo_stop = mcp251x_stop,
998 .ndo_start_xmit = mcp251x_hard_start_xmit, 998 .ndo_start_xmit = mcp251x_hard_start_xmit,
999 .ndo_change_mtu = can_change_mtu,
999}; 1000};
1000 1001
1001static const struct of_device_id mcp251x_of_match[] = { 1002static const struct of_device_id mcp251x_of_match[] = {
@@ -1155,8 +1156,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
1155 1156
1156 devm_can_led_init(net); 1157 devm_can_led_init(net);
1157 1158
1158 dev_info(&spi->dev, "probed\n");
1159
1160 return ret; 1159 return ret;
1161 1160
1162error_probe: 1161error_probe:
@@ -1197,9 +1196,7 @@ static int mcp251x_can_remove(struct spi_device *spi)
1197 return 0; 1196 return 0;
1198} 1197}
1199 1198
1200#ifdef CONFIG_PM_SLEEP 1199static int __maybe_unused mcp251x_can_suspend(struct device *dev)
1201
1202static int mcp251x_can_suspend(struct device *dev)
1203{ 1200{
1204 struct spi_device *spi = to_spi_device(dev); 1201 struct spi_device *spi = to_spi_device(dev);
1205 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1202 struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1221,7 +1218,7 @@ static int mcp251x_can_suspend(struct device *dev)
1221 priv->after_suspend = AFTER_SUSPEND_DOWN; 1218 priv->after_suspend = AFTER_SUSPEND_DOWN;
1222 } 1219 }
1223 1220
1224 if (!IS_ERR(priv->power)) { 1221 if (!IS_ERR_OR_NULL(priv->power)) {
1225 regulator_disable(priv->power); 1222 regulator_disable(priv->power);
1226 priv->after_suspend |= AFTER_SUSPEND_POWER; 1223 priv->after_suspend |= AFTER_SUSPEND_POWER;
1227 } 1224 }
@@ -1229,7 +1226,7 @@ static int mcp251x_can_suspend(struct device *dev)
1229 return 0; 1226 return 0;
1230} 1227}
1231 1228
1232static int mcp251x_can_resume(struct device *dev) 1229static int __maybe_unused mcp251x_can_resume(struct device *dev)
1233{ 1230{
1234 struct spi_device *spi = to_spi_device(dev); 1231 struct spi_device *spi = to_spi_device(dev);
1235 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1232 struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1249,7 +1246,6 @@ static int mcp251x_can_resume(struct device *dev)
1249 enable_irq(spi->irq); 1246 enable_irq(spi->irq);
1250 return 0; 1247 return 0;
1251} 1248}
1252#endif
1253 1249
1254static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend, 1250static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
1255 mcp251x_can_resume); 1251 mcp251x_can_resume);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index b9f3faabb0f3..e0c9be5e2ab7 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -647,9 +647,10 @@ static int mscan_close(struct net_device *dev)
647} 647}
648 648
649static const struct net_device_ops mscan_netdev_ops = { 649static const struct net_device_ops mscan_netdev_ops = {
650 .ndo_open = mscan_open, 650 .ndo_open = mscan_open,
651 .ndo_stop = mscan_close, 651 .ndo_stop = mscan_close,
652 .ndo_start_xmit = mscan_start_xmit, 652 .ndo_start_xmit = mscan_start_xmit,
653 .ndo_change_mtu = can_change_mtu,
653}; 654};
654 655
655int register_mscandev(struct net_device *dev, int mscan_clksrc) 656int register_mscandev(struct net_device *dev, int mscan_clksrc)
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 6c077eb87b5e..6472562efedc 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -950,6 +950,7 @@ static const struct net_device_ops pch_can_netdev_ops = {
950 .ndo_open = pch_can_open, 950 .ndo_open = pch_can_open,
951 .ndo_stop = pch_close, 951 .ndo_stop = pch_close,
952 .ndo_start_xmit = pch_xmit, 952 .ndo_start_xmit = pch_xmit,
953 .ndo_change_mtu = can_change_mtu,
953}; 954};
954 955
955static void pch_can_remove(struct pci_dev *pdev) 956static void pch_can_remove(struct pci_dev *pdev)
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ff2ba86cd4a4..4b18b8765523 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -17,16 +17,9 @@ config CAN_SJA1000_PLATFORM
17 the "platform bus" (Linux abstraction for directly to the 17 the "platform bus" (Linux abstraction for directly to the
18 processor attached devices). Which can be found on various 18 processor attached devices). Which can be found on various
19 boards from Phytec (http://www.phytec.de) like the PCM027, 19 boards from Phytec (http://www.phytec.de) like the PCM027,
20 PCM038. 20 PCM038. It also provides the OpenFirmware "platform bus" found
21 21 on embedded systems with OpenFirmware bindings, e.g. if you
22config CAN_SJA1000_OF_PLATFORM 22 have a PowerPC based system you may want to enable this option.
23 tristate "Generic OF Platform Bus based SJA1000 driver"
24 depends on OF
25 ---help---
26 This driver adds support for the SJA1000 chips connected to
27 the OpenFirmware "platform bus" found on embedded systems with
28 OpenFirmware bindings, e.g. if you have a PowerPC based system
29 you may want to enable this option.
30 23
31config CAN_EMS_PCMCIA 24config CAN_EMS_PCMCIA
32 tristate "EMS CPC-CARD Card" 25 tristate "EMS CPC-CARD Card"
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index b3d05cbfec36..531d5fcc97e5 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -5,7 +5,6 @@
5obj-$(CONFIG_CAN_SJA1000) += sja1000.o 5obj-$(CONFIG_CAN_SJA1000) += sja1000.o
6obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o 6obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
7obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o 7obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
8obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
9obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o 8obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
10obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o 9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
11obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o 10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index d790b874ca79..fd13dbf07d9c 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -323,6 +323,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
323 priv->cdr = EMS_PCI_CDR; 323 priv->cdr = EMS_PCI_CDR;
324 324
325 SET_NETDEV_DEV(dev, &pdev->dev); 325 SET_NETDEV_DEV(dev, &pdev->dev);
326 dev->dev_id = i;
326 327
327 if (card->version == 1) 328 if (card->version == 1)
328 /* reset int flag of pita */ 329 /* reset int flag of pita */
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index 9e535f2ef52b..381de998d2f1 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -211,6 +211,7 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
211 priv = netdev_priv(dev); 211 priv = netdev_priv(dev);
212 priv->priv = card; 212 priv->priv = card;
213 SET_NETDEV_DEV(dev, &pdev->dev); 213 SET_NETDEV_DEV(dev, &pdev->dev);
214 dev->dev_id = i;
214 215
215 priv->irq_flags = IRQF_SHARED; 216 priv->irq_flags = IRQF_SHARED;
216 dev->irq = pdev->irq; 217 dev->irq = pdev->irq;
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index c96eb14699d5..23b8e1324e25 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -270,6 +270,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
270 priv->reg_base, board->conf_addr, dev->irq); 270 priv->reg_base, board->conf_addr, dev->irq);
271 271
272 SET_NETDEV_DEV(dev, &pdev->dev); 272 SET_NETDEV_DEV(dev, &pdev->dev);
273 dev->dev_id = channel;
273 274
274 /* Register SJA1000 device */ 275 /* Register SJA1000 device */
275 err = register_sja1000dev(dev); 276 err = register_sja1000dev(dev);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 065ca49eb45e..c540e3d12e3d 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -642,6 +642,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
642 icr |= chan->icr_mask; 642 icr |= chan->icr_mask;
643 643
644 SET_NETDEV_DEV(dev, &pdev->dev); 644 SET_NETDEV_DEV(dev, &pdev->dev);
645 dev->dev_id = i;
645 646
646 /* Create chain of SJA1000 devices */ 647 /* Create chain of SJA1000 devices */
647 chan->prev_dev = pci_get_drvdata(pdev); 648 chan->prev_dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index f7ad754dd2aa..dd56133cc461 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -550,6 +550,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
550 priv = netdev_priv(netdev); 550 priv = netdev_priv(netdev);
551 priv->priv = card; 551 priv->priv = card;
552 SET_NETDEV_DEV(netdev, &pdev->dev); 552 SET_NETDEV_DEV(netdev, &pdev->dev);
553 netdev->dev_id = i;
553 554
554 priv->irq_flags = IRQF_SHARED; 555 priv->irq_flags = IRQF_SHARED;
555 netdev->irq = pdev->irq; 556 netdev->irq = pdev->irq;
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index fbb61a0d901f..ec39b7cb2287 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -587,6 +587,7 @@ static int plx_pci_add_card(struct pci_dev *pdev,
587 priv->cdr = ci->cdr; 587 priv->cdr = ci->cdr;
588 588
589 SET_NETDEV_DEV(dev, &pdev->dev); 589 SET_NETDEV_DEV(dev, &pdev->dev);
590 dev->dev_id = i;
590 591
591 /* Register SJA1000 device */ 592 /* Register SJA1000 device */
592 err = register_sja1000dev(dev); 593 err = register_sja1000dev(dev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index f17c3018b7c7..f31499a32d7d 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -106,8 +106,7 @@ static int sja1000_probe_chip(struct net_device *dev)
106 struct sja1000_priv *priv = netdev_priv(dev); 106 struct sja1000_priv *priv = netdev_priv(dev);
107 107
108 if (priv->reg_base && sja1000_is_absent(priv)) { 108 if (priv->reg_base && sja1000_is_absent(priv)) {
109 printk(KERN_INFO "%s: probing @0x%lX failed\n", 109 netdev_err(dev, "probing failed\n");
110 DRV_NAME, dev->base_addr);
111 return 0; 110 return 0;
112 } 111 }
113 return -1; 112 return -1;
@@ -643,9 +642,10 @@ void free_sja1000dev(struct net_device *dev)
643EXPORT_SYMBOL_GPL(free_sja1000dev); 642EXPORT_SYMBOL_GPL(free_sja1000dev);
644 643
645static const struct net_device_ops sja1000_netdev_ops = { 644static const struct net_device_ops sja1000_netdev_ops = {
646 .ndo_open = sja1000_open, 645 .ndo_open = sja1000_open,
647 .ndo_stop = sja1000_close, 646 .ndo_stop = sja1000_close,
648 .ndo_start_xmit = sja1000_start_xmit, 647 .ndo_start_xmit = sja1000_start_xmit,
648 .ndo_change_mtu = can_change_mtu,
649}; 649};
650 650
651int register_sja1000dev(struct net_device *dev) 651int register_sja1000dev(struct net_device *dev)
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
deleted file mode 100644
index 2f6e24534231..000000000000
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ /dev/null
@@ -1,220 +0,0 @@
1/*
2 * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
3 *
4 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
20 * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
21 * definition in your flattened device tree source (DTS) file similar to:
22 *
23 * can@3,100 {
24 * compatible = "nxp,sja1000";
25 * reg = <3 0x100 0x80>;
26 * interrupts = <2 0>;
27 * interrupt-parent = <&mpic>;
28 * nxp,external-clock-frequency = <16000000>;
29 * };
30 *
31 * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
32 * information.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/interrupt.h>
38#include <linux/netdevice.h>
39#include <linux/delay.h>
40#include <linux/io.h>
41#include <linux/can/dev.h>
42
43#include <linux/of_platform.h>
44#include <linux/of_address.h>
45#include <linux/of_irq.h>
46
47#include "sja1000.h"
48
49#define DRV_NAME "sja1000_of_platform"
50
51MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
52MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
53MODULE_LICENSE("GPL v2");
54
55#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
56
57#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
58#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
59
60static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
61{
62 return ioread8(priv->reg_base + reg);
63}
64
65static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
66 int reg, u8 val)
67{
68 iowrite8(val, priv->reg_base + reg);
69}
70
71static int sja1000_ofp_remove(struct platform_device *ofdev)
72{
73 struct net_device *dev = platform_get_drvdata(ofdev);
74 struct sja1000_priv *priv = netdev_priv(dev);
75 struct device_node *np = ofdev->dev.of_node;
76 struct resource res;
77
78 unregister_sja1000dev(dev);
79 free_sja1000dev(dev);
80 iounmap(priv->reg_base);
81 irq_dispose_mapping(dev->irq);
82
83 of_address_to_resource(np, 0, &res);
84 release_mem_region(res.start, resource_size(&res));
85
86 return 0;
87}
88
89static int sja1000_ofp_probe(struct platform_device *ofdev)
90{
91 struct device_node *np = ofdev->dev.of_node;
92 struct net_device *dev;
93 struct sja1000_priv *priv;
94 struct resource res;
95 u32 prop;
96 int err, irq, res_size;
97 void __iomem *base;
98
99 err = of_address_to_resource(np, 0, &res);
100 if (err) {
101 dev_err(&ofdev->dev, "invalid address\n");
102 return err;
103 }
104
105 res_size = resource_size(&res);
106
107 if (!request_mem_region(res.start, res_size, DRV_NAME)) {
108 dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
109 return -EBUSY;
110 }
111
112 base = ioremap_nocache(res.start, res_size);
113 if (!base) {
114 dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
115 err = -ENOMEM;
116 goto exit_release_mem;
117 }
118
119 irq = irq_of_parse_and_map(np, 0);
120 if (irq == 0) {
121 dev_err(&ofdev->dev, "no irq found\n");
122 err = -ENODEV;
123 goto exit_unmap_mem;
124 }
125
126 dev = alloc_sja1000dev(0);
127 if (!dev) {
128 err = -ENOMEM;
129 goto exit_dispose_irq;
130 }
131
132 priv = netdev_priv(dev);
133
134 priv->read_reg = sja1000_ofp_read_reg;
135 priv->write_reg = sja1000_ofp_write_reg;
136
137 err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
138 if (!err)
139 priv->can.clock.freq = prop / 2;
140 else
141 priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
142
143 err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
144 if (!err)
145 priv->ocr |= prop & OCR_MODE_MASK;
146 else
147 priv->ocr |= OCR_MODE_NORMAL; /* default */
148
149 err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
150 if (!err)
151 priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
152 else
153 priv->ocr |= OCR_TX0_PULLDOWN; /* default */
154
155 err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
156 if (!err && prop) {
157 u32 divider = priv->can.clock.freq * 2 / prop;
158
159 if (divider > 1)
160 priv->cdr |= divider / 2 - 1;
161 else
162 priv->cdr |= CDR_CLKOUT_MASK;
163 } else {
164 priv->cdr |= CDR_CLK_OFF; /* default */
165 }
166
167 if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
168 priv->cdr |= CDR_CBP; /* default */
169
170 priv->irq_flags = IRQF_SHARED;
171 priv->reg_base = base;
172
173 dev->irq = irq;
174
175 dev_info(&ofdev->dev,
176 "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
177 priv->reg_base, dev->irq, priv->can.clock.freq,
178 priv->ocr, priv->cdr);
179
180 platform_set_drvdata(ofdev, dev);
181 SET_NETDEV_DEV(dev, &ofdev->dev);
182
183 err = register_sja1000dev(dev);
184 if (err) {
185 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
186 DRV_NAME, err);
187 goto exit_free_sja1000;
188 }
189
190 return 0;
191
192exit_free_sja1000:
193 free_sja1000dev(dev);
194exit_dispose_irq:
195 irq_dispose_mapping(irq);
196exit_unmap_mem:
197 iounmap(base);
198exit_release_mem:
199 release_mem_region(res.start, res_size);
200
201 return err;
202}
203
204static struct of_device_id sja1000_ofp_table[] = {
205 {.compatible = "nxp,sja1000"},
206 {},
207};
208MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
209
210static struct platform_driver sja1000_ofp_driver = {
211 .driver = {
212 .owner = THIS_MODULE,
213 .name = DRV_NAME,
214 .of_match_table = sja1000_ofp_table,
215 },
216 .probe = sja1000_ofp_probe,
217 .remove = sja1000_ofp_remove,
218};
219
220module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 943df645b459..95a844a7ee7b 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -26,12 +26,16 @@
26#include <linux/can/dev.h> 26#include <linux/can/dev.h>
27#include <linux/can/platform/sja1000.h> 27#include <linux/can/platform/sja1000.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/of.h>
30#include <linux/of_irq.h>
29 31
30#include "sja1000.h" 32#include "sja1000.h"
31 33
32#define DRV_NAME "sja1000_platform" 34#define DRV_NAME "sja1000_platform"
35#define SP_CAN_CLOCK (16000000 / 2)
33 36
34MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 37MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
38MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
35MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); 39MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
36MODULE_ALIAS("platform:" DRV_NAME); 40MODULE_ALIAS("platform:" DRV_NAME);
37MODULE_LICENSE("GPL v2"); 41MODULE_LICENSE("GPL v2");
@@ -66,59 +70,16 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
66 iowrite8(val, priv->reg_base + reg * 4); 70 iowrite8(val, priv->reg_base + reg * 4);
67} 71}
68 72
69static int sp_probe(struct platform_device *pdev) 73static void sp_populate(struct sja1000_priv *priv,
74 struct sja1000_platform_data *pdata,
75 unsigned long resource_mem_flags)
70{ 76{
71 int err;
72 void __iomem *addr;
73 struct net_device *dev;
74 struct sja1000_priv *priv;
75 struct resource *res_mem, *res_irq;
76 struct sja1000_platform_data *pdata;
77
78 pdata = dev_get_platdata(&pdev->dev);
79 if (!pdata) {
80 dev_err(&pdev->dev, "No platform data provided!\n");
81 err = -ENODEV;
82 goto exit;
83 }
84
85 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
86 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
87 if (!res_mem || !res_irq) {
88 err = -ENODEV;
89 goto exit;
90 }
91
92 if (!request_mem_region(res_mem->start, resource_size(res_mem),
93 DRV_NAME)) {
94 err = -EBUSY;
95 goto exit;
96 }
97
98 addr = ioremap_nocache(res_mem->start, resource_size(res_mem));
99 if (!addr) {
100 err = -ENOMEM;
101 goto exit_release;
102 }
103
104 dev = alloc_sja1000dev(0);
105 if (!dev) {
106 err = -ENOMEM;
107 goto exit_iounmap;
108 }
109 priv = netdev_priv(dev);
110
111 dev->irq = res_irq->start;
112 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
113 if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
114 priv->irq_flags |= IRQF_SHARED;
115 priv->reg_base = addr;
116 /* The CAN clock frequency is half the oscillator clock frequency */ 77 /* The CAN clock frequency is half the oscillator clock frequency */
117 priv->can.clock.freq = pdata->osc_freq / 2; 78 priv->can.clock.freq = pdata->osc_freq / 2;
118 priv->ocr = pdata->ocr; 79 priv->ocr = pdata->ocr;
119 priv->cdr = pdata->cdr; 80 priv->cdr = pdata->cdr;
120 81
121 switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) { 82 switch (resource_mem_flags & IORESOURCE_MEM_TYPE_MASK) {
122 case IORESOURCE_MEM_32BIT: 83 case IORESOURCE_MEM_32BIT:
123 priv->read_reg = sp_read_reg32; 84 priv->read_reg = sp_read_reg32;
124 priv->write_reg = sp_write_reg32; 85 priv->write_reg = sp_write_reg32;
@@ -133,6 +94,124 @@ static int sp_probe(struct platform_device *pdev)
133 priv->write_reg = sp_write_reg8; 94 priv->write_reg = sp_write_reg8;
134 break; 95 break;
135 } 96 }
97}
98
99static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
100{
101 int err;
102 u32 prop;
103
104 err = of_property_read_u32(of, "reg-io-width", &prop);
105 if (err)
106 prop = 1; /* 8 bit is default */
107
108 switch (prop) {
109 case 4:
110 priv->read_reg = sp_read_reg32;
111 priv->write_reg = sp_write_reg32;
112 break;
113 case 2:
114 priv->read_reg = sp_read_reg16;
115 priv->write_reg = sp_write_reg16;
116 break;
117 case 1: /* fallthrough */
118 default:
119 priv->read_reg = sp_read_reg8;
120 priv->write_reg = sp_write_reg8;
121 }
122
123 err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
124 if (!err)
125 priv->can.clock.freq = prop / 2;
126 else
127 priv->can.clock.freq = SP_CAN_CLOCK; /* default */
128
129 err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
130 if (!err)
131 priv->ocr |= prop & OCR_MODE_MASK;
132 else
133 priv->ocr |= OCR_MODE_NORMAL; /* default */
134
135 err = of_property_read_u32(of, "nxp,tx-output-config", &prop);
136 if (!err)
137 priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
138 else
139 priv->ocr |= OCR_TX0_PULLDOWN; /* default */
140
141 err = of_property_read_u32(of, "nxp,clock-out-frequency", &prop);
142 if (!err && prop) {
143 u32 divider = priv->can.clock.freq * 2 / prop;
144
145 if (divider > 1)
146 priv->cdr |= divider / 2 - 1;
147 else
148 priv->cdr |= CDR_CLKOUT_MASK;
149 } else {
150 priv->cdr |= CDR_CLK_OFF; /* default */
151 }
152
153 if (!of_property_read_bool(of, "nxp,no-comparator-bypass"))
154 priv->cdr |= CDR_CBP; /* default */
155}
156
157static int sp_probe(struct platform_device *pdev)
158{
159 int err, irq = 0;
160 void __iomem *addr;
161 struct net_device *dev;
162 struct sja1000_priv *priv;
163 struct resource *res_mem, *res_irq = NULL;
164 struct sja1000_platform_data *pdata;
165 struct device_node *of = pdev->dev.of_node;
166
167 pdata = dev_get_platdata(&pdev->dev);
168 if (!pdata && !of) {
169 dev_err(&pdev->dev, "No platform data provided!\n");
170 return -ENODEV;
171 }
172
173 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (!res_mem)
175 return -ENODEV;
176
177 if (!devm_request_mem_region(&pdev->dev, res_mem->start,
178 resource_size(res_mem), DRV_NAME))
179 return -EBUSY;
180
181 addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
182 resource_size(res_mem));
183 if (!addr)
184 return -ENOMEM;
185
186 if (of)
187 irq = irq_of_parse_and_map(of, 0);
188 else
189 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
190
191 if (!irq && !res_irq)
192 return -ENODEV;
193
194 dev = alloc_sja1000dev(0);
195 if (!dev)
196 return -ENOMEM;
197 priv = netdev_priv(dev);
198
199 if (res_irq) {
200 irq = res_irq->start;
201 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
202 if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
203 priv->irq_flags |= IRQF_SHARED;
204 } else {
205 priv->irq_flags = IRQF_SHARED;
206 }
207
208 dev->irq = irq;
209 priv->reg_base = addr;
210
211 if (of)
212 sp_populate_of(priv, of);
213 else
214 sp_populate(priv, pdata, res_mem->flags);
136 215
137 platform_set_drvdata(pdev, dev); 216 platform_set_drvdata(pdev, dev);
138 SET_NETDEV_DEV(dev, &pdev->dev); 217 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -150,39 +229,32 @@ static int sp_probe(struct platform_device *pdev)
150 229
151 exit_free: 230 exit_free:
152 free_sja1000dev(dev); 231 free_sja1000dev(dev);
153 exit_iounmap:
154 iounmap(addr);
155 exit_release:
156 release_mem_region(res_mem->start, resource_size(res_mem));
157 exit:
158 return err; 232 return err;
159} 233}
160 234
161static int sp_remove(struct platform_device *pdev) 235static int sp_remove(struct platform_device *pdev)
162{ 236{
163 struct net_device *dev = platform_get_drvdata(pdev); 237 struct net_device *dev = platform_get_drvdata(pdev);
164 struct sja1000_priv *priv = netdev_priv(dev);
165 struct resource *res;
166 238
167 unregister_sja1000dev(dev); 239 unregister_sja1000dev(dev);
168
169 if (priv->reg_base)
170 iounmap(priv->reg_base);
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 release_mem_region(res->start, resource_size(res));
174
175 free_sja1000dev(dev); 240 free_sja1000dev(dev);
176 241
177 return 0; 242 return 0;
178} 243}
179 244
245static struct of_device_id sp_of_table[] = {
246 {.compatible = "nxp,sja1000"},
247 {},
248};
249MODULE_DEVICE_TABLE(of, sp_of_table);
250
180static struct platform_driver sp_driver = { 251static struct platform_driver sp_driver = {
181 .probe = sp_probe, 252 .probe = sp_probe,
182 .remove = sp_remove, 253 .remove = sp_remove,
183 .driver = { 254 .driver = {
184 .name = DRV_NAME, 255 .name = DRV_NAME,
185 .owner = THIS_MODULE, 256 .owner = THIS_MODULE,
257 .of_match_table = sp_of_table,
186 }, 258 },
187}; 259};
188 260
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 3fcdae266377..f5b16e0e3a12 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -411,10 +411,16 @@ static void slc_free_netdev(struct net_device *dev)
411 slcan_devs[i] = NULL; 411 slcan_devs[i] = NULL;
412} 412}
413 413
414static int slcan_change_mtu(struct net_device *dev, int new_mtu)
415{
416 return -EINVAL;
417}
418
414static const struct net_device_ops slc_netdev_ops = { 419static const struct net_device_ops slc_netdev_ops = {
415 .ndo_open = slc_open, 420 .ndo_open = slc_open,
416 .ndo_stop = slc_close, 421 .ndo_stop = slc_close,
417 .ndo_start_xmit = slc_xmit, 422 .ndo_start_xmit = slc_xmit,
423 .ndo_change_mtu = slcan_change_mtu,
418}; 424};
419 425
420static void slc_setup(struct net_device *dev) 426static void slc_setup(struct net_device *dev)
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 9ea0dcde94ce..7d8c8f3672dd 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -628,6 +628,7 @@ static const struct net_device_ops softing_netdev_ops = {
628 .ndo_open = softing_netdev_open, 628 .ndo_open = softing_netdev_open,
629 .ndo_stop = softing_netdev_stop, 629 .ndo_stop = softing_netdev_stop,
630 .ndo_start_xmit = softing_netdev_start_xmit, 630 .ndo_start_xmit = softing_netdev_start_xmit,
631 .ndo_change_mtu = can_change_mtu,
631}; 632};
632 633
633static const struct can_bittiming_const softing_btr_const = { 634static const struct can_bittiming_const softing_btr_const = {
@@ -832,6 +833,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
832 ret = -ENOMEM; 833 ret = -ENOMEM;
833 goto netdev_failed; 834 goto netdev_failed;
834 } 835 }
836 netdev->dev_id = j;
835 priv = netdev_priv(card->net[j]); 837 priv = netdev_priv(card->net[j]);
836 priv->index = j; 838 priv->index = j;
837 ret = softing_netdev_register(netdev); 839 ret = softing_netdev_register(netdev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2c62fe6c8fa9..258b9c4856ec 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -871,6 +871,7 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
871 .ndo_open = ti_hecc_open, 871 .ndo_open = ti_hecc_open,
872 .ndo_stop = ti_hecc_close, 872 .ndo_stop = ti_hecc_close,
873 .ndo_start_xmit = ti_hecc_xmit, 873 .ndo_start_xmit = ti_hecc_xmit,
874 .ndo_change_mtu = can_change_mtu,
874}; 875};
875 876
876static int ti_hecc_probe(struct platform_device *pdev) 877static int ti_hecc_probe(struct platform_device *pdev)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 52c42fd49510..00f2534dde73 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -883,6 +883,7 @@ static const struct net_device_ops ems_usb_netdev_ops = {
883 .ndo_open = ems_usb_open, 883 .ndo_open = ems_usb_open,
884 .ndo_stop = ems_usb_close, 884 .ndo_stop = ems_usb_close,
885 .ndo_start_xmit = ems_usb_start_xmit, 885 .ndo_start_xmit = ems_usb_start_xmit,
886 .ndo_change_mtu = can_change_mtu,
886}; 887};
887 888
888static const struct can_bittiming_const ems_usb_bittiming_const = { 889static const struct can_bittiming_const ems_usb_bittiming_const = {
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 7fbe85935f1d..b7c9e8b11460 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -888,6 +888,7 @@ static const struct net_device_ops esd_usb2_netdev_ops = {
888 .ndo_open = esd_usb2_open, 888 .ndo_open = esd_usb2_open,
889 .ndo_stop = esd_usb2_close, 889 .ndo_stop = esd_usb2_close,
890 .ndo_start_xmit = esd_usb2_start_xmit, 890 .ndo_start_xmit = esd_usb2_start_xmit,
891 .ndo_change_mtu = can_change_mtu,
891}; 892};
892 893
893static const struct can_bittiming_const esd_usb2_bittiming_const = { 894static const struct can_bittiming_const esd_usb2_bittiming_const = {
@@ -1024,6 +1025,7 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
1024 netdev->netdev_ops = &esd_usb2_netdev_ops; 1025 netdev->netdev_ops = &esd_usb2_netdev_ops;
1025 1026
1026 SET_NETDEV_DEV(netdev, &intf->dev); 1027 SET_NETDEV_DEV(netdev, &intf->dev);
1028 netdev->dev_id = index;
1027 1029
1028 err = register_candev(netdev); 1030 err = register_candev(netdev);
1029 if (err) { 1031 if (err) {
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index e77d11049747..4ca46edc061d 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1388,6 +1388,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
1388 .ndo_open = kvaser_usb_open, 1388 .ndo_open = kvaser_usb_open,
1389 .ndo_stop = kvaser_usb_close, 1389 .ndo_stop = kvaser_usb_close,
1390 .ndo_start_xmit = kvaser_usb_start_xmit, 1390 .ndo_start_xmit = kvaser_usb_start_xmit,
1391 .ndo_change_mtu = can_change_mtu,
1391}; 1392};
1392 1393
1393static const struct can_bittiming_const kvaser_usb_bittiming_const = { 1394static const struct can_bittiming_const kvaser_usb_bittiming_const = {
@@ -1529,6 +1530,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1529 netdev->netdev_ops = &kvaser_usb_netdev_ops; 1530 netdev->netdev_ops = &kvaser_usb_netdev_ops;
1530 1531
1531 SET_NETDEV_DEV(netdev, &intf->dev); 1532 SET_NETDEV_DEV(netdev, &intf->dev);
1533 netdev->dev_id = channel;
1532 1534
1533 dev->nets[channel] = priv; 1535 dev->nets[channel] = priv;
1534 1536
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 0b7a4c3b01a2..644e6ab8a489 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -702,6 +702,7 @@ static const struct net_device_ops peak_usb_netdev_ops = {
702 .ndo_open = peak_usb_ndo_open, 702 .ndo_open = peak_usb_ndo_open,
703 .ndo_stop = peak_usb_ndo_stop, 703 .ndo_stop = peak_usb_ndo_stop,
704 .ndo_start_xmit = peak_usb_ndo_start_xmit, 704 .ndo_start_xmit = peak_usb_ndo_start_xmit,
705 .ndo_change_mtu = can_change_mtu,
705}; 706};
706 707
707/* 708/*
@@ -769,6 +770,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
769 usb_set_intfdata(intf, dev); 770 usb_set_intfdata(intf, dev);
770 771
771 SET_NETDEV_DEV(netdev, &intf->dev); 772 SET_NETDEV_DEV(netdev, &intf->dev);
773 netdev->dev_id = ctrl_idx;
772 774
773 err = register_candev(netdev); 775 err = register_candev(netdev);
774 if (err) { 776 if (err) {
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index a0fa1fd5092b..ef674ecb82f8 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -697,8 +697,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
697 return NETDEV_TX_OK; 697 return NETDEV_TX_OK;
698 698
699nofreecontext: 699nofreecontext:
700 usb_unanchor_urb(urb);
701 usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); 700 usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
701 usb_free_urb(urb);
702 702
703 netdev_warn(netdev, "couldn't find free context"); 703 netdev_warn(netdev, "couldn't find free context");
704 704
@@ -887,6 +887,7 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
887 .ndo_open = usb_8dev_open, 887 .ndo_open = usb_8dev_open,
888 .ndo_stop = usb_8dev_close, 888 .ndo_stop = usb_8dev_close,
889 .ndo_start_xmit = usb_8dev_start_xmit, 889 .ndo_start_xmit = usb_8dev_start_xmit,
890 .ndo_change_mtu = can_change_mtu,
890}; 891};
891 892
892static const struct can_bittiming_const usb_8dev_bittiming_const = { 893static const struct can_bittiming_const usb_8dev_bittiming_const = {
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bd8f84b0b894..0932ffbf381b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -63,10 +63,10 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
63 63
64 dstats = per_cpu_ptr(dev->dstats, i); 64 dstats = per_cpu_ptr(dev->dstats, i);
65 do { 65 do {
66 start = u64_stats_fetch_begin_bh(&dstats->syncp); 66 start = u64_stats_fetch_begin_irq(&dstats->syncp);
67 tbytes = dstats->tx_bytes; 67 tbytes = dstats->tx_bytes;
68 tpackets = dstats->tx_packets; 68 tpackets = dstats->tx_packets;
69 } while (u64_stats_fetch_retry_bh(&dstats->syncp, start)); 69 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
70 stats->tx_bytes += tbytes; 70 stats->tx_bytes += tbytes;
71 stats->tx_packets += tpackets; 71 stats->tx_packets += tpackets;
72 } 72 }
@@ -88,16 +88,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
88 88
89static int dummy_dev_init(struct net_device *dev) 89static int dummy_dev_init(struct net_device *dev)
90{ 90{
91 int i; 91 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
92 dev->dstats = alloc_percpu(struct pcpu_dstats);
93 if (!dev->dstats) 92 if (!dev->dstats)
94 return -ENOMEM; 93 return -ENOMEM;
95 94
96 for_each_possible_cpu(i) {
97 struct pcpu_dstats *dstats;
98 dstats = per_cpu_ptr(dev->dstats, i);
99 u64_stats_init(&dstats->syncp);
100 }
101 return 0; 95 return 0;
102} 96}
103 97
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index c53384d41c96..35df0b9e6848 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
749 749
750 spin_unlock_irqrestore(&lp->lock, flags); 750 spin_unlock_irqrestore(&lp->lock, flags);
751 751
752 dev_kfree_skb (skb); 752 dev_consume_skb_any (skb);
753 753
754 /* Clear the Tx status stack. */ 754 /* Clear the Tx status stack. */
755 { 755 {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5992860a39c9..063557e037f2 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -1,23 +1,24 @@
1/*====================================================================== 1/* ======================================================================
2 2 *
3 A PCMCIA ethernet driver for the 3com 3c589 card. 3 * A PCMCIA ethernet driver for the 3com 3c589 card.
4 4 *
5 Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net 5 * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
6 6 *
7 3c589_cs.c 1.162 2001/10/13 00:08:50 7 * 3c589_cs.c 1.162 2001/10/13 00:08:50
8 8 *
9 The network driver code is based on Donald Becker's 3c589 code: 9 * The network driver code is based on Donald Becker's 3c589 code:
10 10 *
11 Written 1994 by Donald Becker. 11 * Written 1994 by Donald Becker.
12 Copyright 1993 United States Government as represented by the 12 * Copyright 1993 United States Government as represented by the
13 Director, National Security Agency. This software may be used and 13 * Director, National Security Agency. This software may be used and
14 distributed according to the terms of the GNU General Public License, 14 * distributed according to the terms of the GNU General Public License,
15 incorporated herein by reference. 15 * incorporated herein by reference.
16 Donald Becker may be reached at becker@scyld.com 16 * Donald Becker may be reached at becker@scyld.com
17 17 *
18 Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> 18 * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
19 19 *
20======================================================================*/ 20 * ======================================================================
21 */
21 22
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 24
@@ -41,18 +42,20 @@
41#include <linux/ioport.h> 42#include <linux/ioport.h>
42#include <linux/bitops.h> 43#include <linux/bitops.h>
43#include <linux/jiffies.h> 44#include <linux/jiffies.h>
45#include <linux/uaccess.h>
46#include <linux/io.h>
44 47
45#include <pcmcia/cistpl.h> 48#include <pcmcia/cistpl.h>
46#include <pcmcia/cisreg.h> 49#include <pcmcia/cisreg.h>
47#include <pcmcia/ciscode.h> 50#include <pcmcia/ciscode.h>
48#include <pcmcia/ds.h> 51#include <pcmcia/ds.h>
49 52
50#include <asm/uaccess.h>
51#include <asm/io.h>
52 53
53/* To minimize the size of the driver source I only define operating 54/* To minimize the size of the driver source I only define operating
54 constants if they are used several times. You'll need the manual 55 * constants if they are used several times. You'll need the manual
55 if you want to understand driver details. */ 56 * if you want to understand driver details.
57 */
58
56/* Offsets from base I/O address. */ 59/* Offsets from base I/O address. */
57#define EL3_DATA 0x00 60#define EL3_DATA 0x00
58#define EL3_TIMER 0x0a 61#define EL3_TIMER 0x0a
@@ -65,7 +68,9 @@
65#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) 68#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
66 69
67/* The top five bits written to EL3_CMD are a command, the lower 70/* The top five bits written to EL3_CMD are a command, the lower
68 11 bits are the parameter, if applicable. */ 71 * 11 bits are the parameter, if applicable.
72 */
73
69enum c509cmd { 74enum c509cmd {
70 TotalReset = 0<<11, 75 TotalReset = 0<<11,
71 SelectWindow = 1<<11, 76 SelectWindow = 1<<11,
@@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = {
190 195
191static int tc589_probe(struct pcmcia_device *link) 196static int tc589_probe(struct pcmcia_device *link)
192{ 197{
193 struct el3_private *lp; 198 struct el3_private *lp;
194 struct net_device *dev; 199 struct net_device *dev;
195 200
196 dev_dbg(&link->dev, "3c589_attach()\n"); 201 dev_dbg(&link->dev, "3c589_attach()\n");
197 202
198 /* Create new ethernet device */ 203 /* Create new ethernet device */
199 dev = alloc_etherdev(sizeof(struct el3_private)); 204 dev = alloc_etherdev(sizeof(struct el3_private));
200 if (!dev) 205 if (!dev)
201 return -ENOMEM; 206 return -ENOMEM;
202 lp = netdev_priv(dev); 207 lp = netdev_priv(dev);
203 link->priv = dev; 208 link->priv = dev;
204 lp->p_dev = link; 209 lp->p_dev = link;
205 210
206 spin_lock_init(&lp->lock); 211 spin_lock_init(&lp->lock);
207 link->resource[0]->end = 16; 212 link->resource[0]->end = 16;
208 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; 213 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
209 214
210 link->config_flags |= CONF_ENABLE_IRQ; 215 link->config_flags |= CONF_ENABLE_IRQ;
211 link->config_index = 1; 216 link->config_index = 1;
212 217
213 dev->netdev_ops = &el3_netdev_ops; 218 dev->netdev_ops = &el3_netdev_ops;
214 dev->watchdog_timeo = TX_TIMEOUT; 219 dev->watchdog_timeo = TX_TIMEOUT;
215 220
216 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 221 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
217 222
218 return tc589_config(link); 223 return tc589_config(link);
219} 224}
220 225
221static void tc589_detach(struct pcmcia_device *link) 226static void tc589_detach(struct pcmcia_device *link)
222{ 227{
223 struct net_device *dev = link->priv; 228 struct net_device *dev = link->priv;
224 229
225 dev_dbg(&link->dev, "3c589_detach\n"); 230 dev_dbg(&link->dev, "3c589_detach\n");
226 231
227 unregister_netdev(dev); 232 unregister_netdev(dev);
228 233
229 tc589_release(link); 234 tc589_release(link);
230 235
231 free_netdev(dev); 236 free_netdev(dev);
232} /* tc589_detach */ 237} /* tc589_detach */
233 238
234static int tc589_config(struct pcmcia_device *link) 239static int tc589_config(struct pcmcia_device *link)
235{ 240{
236 struct net_device *dev = link->priv; 241 struct net_device *dev = link->priv;
237 __be16 *phys_addr; 242 __be16 *phys_addr;
238 int ret, i, j, multi = 0, fifo; 243 int ret, i, j, multi = 0, fifo;
239 unsigned int ioaddr; 244 unsigned int ioaddr;
240 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 245 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
241 u8 *buf; 246 u8 *buf;
242 size_t len; 247 size_t len;
243 248
244 dev_dbg(&link->dev, "3c589_config\n"); 249 dev_dbg(&link->dev, "3c589_config\n");
245 250
246 phys_addr = (__be16 *)dev->dev_addr; 251 phys_addr = (__be16 *)dev->dev_addr;
247 /* Is this a 3c562? */ 252 /* Is this a 3c562? */
248 if (link->manf_id != MANFID_3COM) 253 if (link->manf_id != MANFID_3COM)
249 dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); 254 dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
250 multi = (link->card_id == PRODID_3COM_3C562); 255 multi = (link->card_id == PRODID_3COM_3C562);
251 256
252 link->io_lines = 16; 257 link->io_lines = 16;
253 258
254 /* For the 3c562, the base address must be xx00-xx7f */ 259 /* For the 3c562, the base address must be xx00-xx7f */
255 for (i = j = 0; j < 0x400; j += 0x10) { 260 for (i = j = 0; j < 0x400; j += 0x10) {
256 if (multi && (j & 0x80)) continue; 261 if (multi && (j & 0x80))
257 link->resource[0]->start = j ^ 0x300; 262 continue;
258 i = pcmcia_request_io(link); 263 link->resource[0]->start = j ^ 0x300;
259 if (i == 0) 264 i = pcmcia_request_io(link);
260 break; 265 if (i == 0)
261 } 266 break;
262 if (i != 0)
263 goto failed;
264
265 ret = pcmcia_request_irq(link, el3_interrupt);
266 if (ret)
267 goto failed;
268
269 ret = pcmcia_enable_device(link);
270 if (ret)
271 goto failed;
272
273 dev->irq = link->irq;
274 dev->base_addr = link->resource[0]->start;
275 ioaddr = dev->base_addr;
276 EL3WINDOW(0);
277
278 /* The 3c589 has an extra EEPROM for configuration info, including
279 the hardware address. The 3c562 puts the address in the CIS. */
280 len = pcmcia_get_tuple(link, 0x88, &buf);
281 if (buf && len >= 6) {
282 for (i = 0; i < 3; i++)
283 phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
284 kfree(buf);
285 } else {
286 kfree(buf); /* 0 < len < 6 */
287 for (i = 0; i < 3; i++)
288 phys_addr[i] = htons(read_eeprom(ioaddr, i));
289 if (phys_addr[0] == htons(0x6060)) {
290 dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
291 dev->base_addr, dev->base_addr+15);
292 goto failed;
293 } 267 }
294 } 268 if (i != 0)
295 269 goto failed;
296 /* The address and resource configuration register aren't loaded from 270
297 the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */ 271 ret = pcmcia_request_irq(link, el3_interrupt);
298 outw(0x3f00, ioaddr + 8); 272 if (ret)
299 fifo = inl(ioaddr); 273 goto failed;
300 274
301 /* The if_port symbol can be set when the module is loaded */ 275 ret = pcmcia_enable_device(link);
302 if ((if_port >= 0) && (if_port <= 3)) 276 if (ret)
303 dev->if_port = if_port; 277 goto failed;
304 else 278
305 dev_err(&link->dev, "invalid if_port requested\n"); 279 dev->irq = link->irq;
306 280 dev->base_addr = link->resource[0]->start;
307 SET_NETDEV_DEV(dev, &link->dev); 281 ioaddr = dev->base_addr;
308 282 EL3WINDOW(0);
309 if (register_netdev(dev) != 0) { 283
310 dev_err(&link->dev, "register_netdev() failed\n"); 284 /* The 3c589 has an extra EEPROM for configuration info, including
311 goto failed; 285 * the hardware address. The 3c562 puts the address in the CIS.
312 } 286 */
313 287 len = pcmcia_get_tuple(link, 0x88, &buf);
314 netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", 288 if (buf && len >= 6) {
315 (multi ? "562" : "589"), dev->base_addr, dev->irq, 289 for (i = 0; i < 3; i++)
316 dev->dev_addr); 290 phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
317 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", 291 kfree(buf);
318 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], 292 } else {
319 if_names[dev->if_port]); 293 kfree(buf); /* 0 < len < 6 */
320 return 0; 294 for (i = 0; i < 3; i++)
295 phys_addr[i] = htons(read_eeprom(ioaddr, i));
296 if (phys_addr[0] == htons(0x6060)) {
297 dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
298 dev->base_addr, dev->base_addr+15);
299 goto failed;
300 }
301 }
302
303 /* The address and resource configuration register aren't loaded from
304 * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
305 */
306
307 outw(0x3f00, ioaddr + 8);
308 fifo = inl(ioaddr);
309
310 /* The if_port symbol can be set when the module is loaded */
311 if ((if_port >= 0) && (if_port <= 3))
312 dev->if_port = if_port;
313 else
314 dev_err(&link->dev, "invalid if_port requested\n");
315
316 SET_NETDEV_DEV(dev, &link->dev);
317
318 if (register_netdev(dev) != 0) {
319 dev_err(&link->dev, "register_netdev() failed\n");
320 goto failed;
321 }
322
323 netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
324 (multi ? "562" : "589"), dev->base_addr, dev->irq,
325 dev->dev_addr);
326 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
327 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
328 if_names[dev->if_port]);
329 return 0;
321 330
322failed: 331failed:
323 tc589_release(link); 332 tc589_release(link);
324 return -ENODEV; 333 return -ENODEV;
325} /* tc589_config */ 334} /* tc589_config */
326 335
327static void tc589_release(struct pcmcia_device *link) 336static void tc589_release(struct pcmcia_device *link)
@@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link)
353 362
354/*====================================================================*/ 363/*====================================================================*/
355 364
356/* 365/* Use this for commands that may take time to finish */
357 Use this for commands that may take time to finish 366
358*/
359static void tc589_wait_for_completion(struct net_device *dev, int cmd) 367static void tc589_wait_for_completion(struct net_device *dev, int cmd)
360{ 368{
361 int i = 100; 369 int i = 100;
362 outw(cmd, dev->base_addr + EL3_CMD); 370 outw(cmd, dev->base_addr + EL3_CMD);
363 while (--i > 0) 371 while (--i > 0)
364 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; 372 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000))
365 if (i == 0) 373 break;
366 netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); 374 if (i == 0)
375 netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
367} 376}
368 377
369/* 378/* Read a word from the EEPROM using the regular EEPROM access register.
370 Read a word from the EEPROM using the regular EEPROM access register. 379 * Assume that we are in register window zero.
371 Assume that we are in register window zero. 380 */
372*/ 381
373static u16 read_eeprom(unsigned int ioaddr, int index) 382static u16 read_eeprom(unsigned int ioaddr, int index)
374{ 383{
375 int i; 384 int i;
376 outw(EEPROM_READ + index, ioaddr + 10); 385 outw(EEPROM_READ + index, ioaddr + 10);
377 /* Reading the eeprom takes 162 us */ 386 /* Reading the eeprom takes 162 us */
378 for (i = 1620; i >= 0; i--) 387 for (i = 1620; i >= 0; i--)
379 if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) 388 if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
380 break; 389 break;
381 return inw(ioaddr + 12); 390 return inw(ioaddr + 12);
382} 391}
383 392
384/* 393/* Set transceiver type, perhaps to something other than what the user
385 Set transceiver type, perhaps to something other than what the user 394 * specified in dev->if_port.
386 specified in dev->if_port. 395 */
387*/ 396
388static void tc589_set_xcvr(struct net_device *dev, int if_port) 397static void tc589_set_xcvr(struct net_device *dev, int if_port)
389{ 398{
390 struct el3_private *lp = netdev_priv(dev); 399 struct el3_private *lp = netdev_priv(dev);
391 unsigned int ioaddr = dev->base_addr; 400 unsigned int ioaddr = dev->base_addr;
392 401
393 EL3WINDOW(0); 402 EL3WINDOW(0);
394 switch (if_port) { 403 switch (if_port) {
395 case 0: case 1: outw(0, ioaddr + 6); break; 404 case 0:
396 case 2: outw(3<<14, ioaddr + 6); break; 405 case 1:
397 case 3: outw(1<<14, ioaddr + 6); break; 406 outw(0, ioaddr + 6);
398 } 407 break;
399 /* On PCMCIA, this just turns on the LED */ 408 case 2:
400 outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); 409 outw(3<<14, ioaddr + 6);
401 /* 10baseT interface, enable link beat and jabber check. */ 410 break;
402 EL3WINDOW(4); 411 case 3:
403 outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); 412 outw(1<<14, ioaddr + 6);
404 EL3WINDOW(1); 413 break;
405 if (if_port == 2) 414 }
406 lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); 415 /* On PCMCIA, this just turns on the LED */
407 else 416 outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
408 lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); 417 /* 10baseT interface, enable link beat and jabber check. */
418 EL3WINDOW(4);
419 outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
420 EL3WINDOW(1);
421 if (if_port == 2)
422 lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
423 else
424 lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
409} 425}
410 426
411static void dump_status(struct net_device *dev) 427static void dump_status(struct net_device *dev)
412{ 428{
413 unsigned int ioaddr = dev->base_addr; 429 unsigned int ioaddr = dev->base_addr;
414 EL3WINDOW(1); 430 EL3WINDOW(1);
415 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", 431 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
416 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), 432 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
417 inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); 433 inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
418 EL3WINDOW(4); 434 EL3WINDOW(4);
419 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", 435 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
420 inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), 436 inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
421 inw(ioaddr+0x0a)); 437 inw(ioaddr+0x0a));
422 EL3WINDOW(1); 438 EL3WINDOW(1);
423} 439}
424 440
425/* Reset and restore all of the 3c589 registers. */ 441/* Reset and restore all of the 3c589 registers. */
426static void tc589_reset(struct net_device *dev) 442static void tc589_reset(struct net_device *dev)
427{ 443{
428 unsigned int ioaddr = dev->base_addr; 444 unsigned int ioaddr = dev->base_addr;
429 int i; 445 int i;
430 446
431 EL3WINDOW(0); 447 EL3WINDOW(0);
432 outw(0x0001, ioaddr + 4); /* Activate board. */ 448 outw(0x0001, ioaddr + 4); /* Activate board. */
433 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ 449 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
434 450
435 /* Set the station address in window 2. */ 451 /* Set the station address in window 2. */
436 EL3WINDOW(2); 452 EL3WINDOW(2);
437 for (i = 0; i < 6; i++) 453 for (i = 0; i < 6; i++)
438 outb(dev->dev_addr[i], ioaddr + i); 454 outb(dev->dev_addr[i], ioaddr + i);
439 455
440 tc589_set_xcvr(dev, dev->if_port); 456 tc589_set_xcvr(dev, dev->if_port);
441 457
442 /* Switch to the stats window, and clear all stats by reading. */ 458 /* Switch to the stats window, and clear all stats by reading. */
443 outw(StatsDisable, ioaddr + EL3_CMD); 459 outw(StatsDisable, ioaddr + EL3_CMD);
444 EL3WINDOW(6); 460 EL3WINDOW(6);
445 for (i = 0; i < 9; i++) 461 for (i = 0; i < 9; i++)
446 inb(ioaddr+i); 462 inb(ioaddr+i);
447 inw(ioaddr + 10); 463 inw(ioaddr + 10);
448 inw(ioaddr + 12); 464 inw(ioaddr + 12);
449 465
450 /* Switch to register set 1 for normal use. */ 466 /* Switch to register set 1 for normal use. */
451 EL3WINDOW(1); 467 EL3WINDOW(1);
452 468
453 set_rx_mode(dev); 469 set_rx_mode(dev);
454 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ 470 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
455 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ 471 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
456 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ 472 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
457 /* Allow status bits to be seen. */ 473 /* Allow status bits to be seen. */
458 outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); 474 outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
459 /* Ack all pending events, and set active indicator mask. */ 475 /* Ack all pending events, and set active indicator mask. */
460 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, 476 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
461 ioaddr + EL3_CMD); 477 ioaddr + EL3_CMD);
462 outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull 478 outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
463 | AdapterFailure, ioaddr + EL3_CMD); 479 | AdapterFailure, ioaddr + EL3_CMD);
464} 480}
465 481
@@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = {
478 494
479static int el3_config(struct net_device *dev, struct ifmap *map) 495static int el3_config(struct net_device *dev, struct ifmap *map)
480{ 496{
481 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { 497 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
482 if (map->port <= 3) { 498 if (map->port <= 3) {
483 dev->if_port = map->port; 499 dev->if_port = map->port;
484 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); 500 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
485 tc589_set_xcvr(dev, dev->if_port); 501 tc589_set_xcvr(dev, dev->if_port);
486 } else 502 } else {
487 return -EINVAL; 503 return -EINVAL;
488 } 504 }
489 return 0; 505 }
506 return 0;
490} 507}
491 508
492static int el3_open(struct net_device *dev) 509static int el3_open(struct net_device *dev)
493{ 510{
494 struct el3_private *lp = netdev_priv(dev); 511 struct el3_private *lp = netdev_priv(dev);
495 struct pcmcia_device *link = lp->p_dev; 512 struct pcmcia_device *link = lp->p_dev;
496 513
497 if (!pcmcia_dev_present(link)) 514 if (!pcmcia_dev_present(link))
498 return -ENODEV; 515 return -ENODEV;
499 516
500 link->open++; 517 link->open++;
501 netif_start_queue(dev); 518 netif_start_queue(dev);
502 519
503 tc589_reset(dev); 520 tc589_reset(dev);
504 init_timer(&lp->media); 521 init_timer(&lp->media);
505 lp->media.function = media_check; 522 lp->media.function = media_check;
506 lp->media.data = (unsigned long) dev; 523 lp->media.data = (unsigned long) dev;
507 lp->media.expires = jiffies + HZ; 524 lp->media.expires = jiffies + HZ;
508 add_timer(&lp->media); 525 add_timer(&lp->media);
509 526
510 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", 527 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
511 dev->name, inw(dev->base_addr + EL3_STATUS)); 528 dev->name, inw(dev->base_addr + EL3_STATUS));
512 529
513 return 0; 530 return 0;
514} 531}
515 532
516static void el3_tx_timeout(struct net_device *dev) 533static void el3_tx_timeout(struct net_device *dev)
517{ 534{
518 unsigned int ioaddr = dev->base_addr; 535 unsigned int ioaddr = dev->base_addr;
519 536
520 netdev_warn(dev, "Transmit timed out!\n"); 537 netdev_warn(dev, "Transmit timed out!\n");
521 dump_status(dev); 538 dump_status(dev);
522 dev->stats.tx_errors++; 539 dev->stats.tx_errors++;
523 dev->trans_start = jiffies; /* prevent tx timeout */ 540 dev->trans_start = jiffies; /* prevent tx timeout */
524 /* Issue TX_RESET and TX_START commands. */ 541 /* Issue TX_RESET and TX_START commands. */
525 tc589_wait_for_completion(dev, TxReset); 542 tc589_wait_for_completion(dev, TxReset);
526 outw(TxEnable, ioaddr + EL3_CMD); 543 outw(TxEnable, ioaddr + EL3_CMD);
527 netif_wake_queue(dev); 544 netif_wake_queue(dev);
528} 545}
529 546
530static void pop_tx_status(struct net_device *dev) 547static void pop_tx_status(struct net_device *dev)
531{ 548{
532 unsigned int ioaddr = dev->base_addr; 549 unsigned int ioaddr = dev->base_addr;
533 int i; 550 int i;
534 551
535 /* Clear the Tx status stack. */ 552 /* Clear the Tx status stack. */
536 for (i = 32; i > 0; i--) { 553 for (i = 32; i > 0; i--) {
537 u_char tx_status = inb(ioaddr + TX_STATUS); 554 u_char tx_status = inb(ioaddr + TX_STATUS);
538 if (!(tx_status & 0x84)) break; 555 if (!(tx_status & 0x84))
539 /* reset transmitter on jabber error or underrun */ 556 break;
540 if (tx_status & 0x30) 557 /* reset transmitter on jabber error or underrun */
541 tc589_wait_for_completion(dev, TxReset); 558 if (tx_status & 0x30)
542 if (tx_status & 0x38) { 559 tc589_wait_for_completion(dev, TxReset);
543 netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); 560 if (tx_status & 0x38) {
544 outw(TxEnable, ioaddr + EL3_CMD); 561 netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
545 dev->stats.tx_aborted_errors++; 562 outw(TxEnable, ioaddr + EL3_CMD);
563 dev->stats.tx_aborted_errors++;
564 }
565 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
546 } 566 }
547 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
548 }
549} 567}
550 568
551static netdev_tx_t el3_start_xmit(struct sk_buff *skb, 569static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
552 struct net_device *dev) 570 struct net_device *dev)
553{ 571{
554 unsigned int ioaddr = dev->base_addr; 572 unsigned int ioaddr = dev->base_addr;
555 struct el3_private *priv = netdev_priv(dev); 573 struct el3_private *priv = netdev_priv(dev);
556 unsigned long flags; 574 unsigned long flags;
557 575
558 netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", 576 netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
559 (long)skb->len, inw(ioaddr + EL3_STATUS)); 577 (long)skb->len, inw(ioaddr + EL3_STATUS));
560 578
561 spin_lock_irqsave(&priv->lock, flags); 579 spin_lock_irqsave(&priv->lock, flags);
562 580
563 dev->stats.tx_bytes += skb->len; 581 dev->stats.tx_bytes += skb->len;
564 582
565 /* Put out the doubleword header... */ 583 /* Put out the doubleword header... */
566 outw(skb->len, ioaddr + TX_FIFO); 584 outw(skb->len, ioaddr + TX_FIFO);
567 outw(0x00, ioaddr + TX_FIFO); 585 outw(0x00, ioaddr + TX_FIFO);
568 /* ... and the packet rounded to a doubleword. */ 586 /* ... and the packet rounded to a doubleword. */
569 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); 587 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
570 588
571 if (inw(ioaddr + TX_FREE) <= 1536) { 589 if (inw(ioaddr + TX_FREE) <= 1536) {
572 netif_stop_queue(dev); 590 netif_stop_queue(dev);
573 /* Interrupt us when the FIFO has room for max-sized packet. */ 591 /* Interrupt us when the FIFO has room for max-sized packet. */
574 outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); 592 outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
575 } 593 }
576 594
577 pop_tx_status(dev); 595 pop_tx_status(dev);
578 spin_unlock_irqrestore(&priv->lock, flags); 596 spin_unlock_irqrestore(&priv->lock, flags);
579 dev_kfree_skb(skb); 597 dev_kfree_skb(skb);
580 598
581 return NETDEV_TX_OK; 599 return NETDEV_TX_OK;
582} 600}
583 601
584/* The EL3 interrupt handler. */ 602/* The EL3 interrupt handler. */
585static irqreturn_t el3_interrupt(int irq, void *dev_id) 603static irqreturn_t el3_interrupt(int irq, void *dev_id)
586{ 604{
587 struct net_device *dev = (struct net_device *) dev_id; 605 struct net_device *dev = (struct net_device *) dev_id;
588 struct el3_private *lp = netdev_priv(dev); 606 struct el3_private *lp = netdev_priv(dev);
589 unsigned int ioaddr; 607 unsigned int ioaddr;
590 __u16 status; 608 __u16 status;
591 int i = 0, handled = 1; 609 int i = 0, handled = 1;
592 610
593 if (!netif_device_present(dev)) 611 if (!netif_device_present(dev))
594 return IRQ_NONE; 612 return IRQ_NONE;
595 613
596 ioaddr = dev->base_addr; 614 ioaddr = dev->base_addr;
597 615
598 netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); 616 netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
599 617
600 spin_lock(&lp->lock); 618 spin_lock(&lp->lock);
601 while ((status = inw(ioaddr + EL3_STATUS)) & 619 while ((status = inw(ioaddr + EL3_STATUS)) &
602 (IntLatch | RxComplete | StatsFull)) { 620 (IntLatch | RxComplete | StatsFull)) {
603 if ((status & 0xe000) != 0x2000) { 621 if ((status & 0xe000) != 0x2000) {
604 netdev_dbg(dev, "interrupt from dead card\n"); 622 netdev_dbg(dev, "interrupt from dead card\n");
605 handled = 0; 623 handled = 0;
606 break; 624 break;
607 } 625 }
608 if (status & RxComplete) 626 if (status & RxComplete)
609 el3_rx(dev); 627 el3_rx(dev);
610 if (status & TxAvailable) { 628 if (status & TxAvailable) {
611 netdev_dbg(dev, " TX room bit was handled.\n"); 629 netdev_dbg(dev, " TX room bit was handled.\n");
612 /* There's room in the FIFO for a full-sized packet. */ 630 /* There's room in the FIFO for a full-sized packet. */
613 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 631 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
614 netif_wake_queue(dev); 632 netif_wake_queue(dev);
615 } 633 }
616 if (status & TxComplete) 634 if (status & TxComplete)
617 pop_tx_status(dev); 635 pop_tx_status(dev);
618 if (status & (AdapterFailure | RxEarly | StatsFull)) { 636 if (status & (AdapterFailure | RxEarly | StatsFull)) {
619 /* Handle all uncommon interrupts. */ 637 /* Handle all uncommon interrupts. */
620 if (status & StatsFull) /* Empty statistics. */ 638 if (status & StatsFull) /* Empty statistics. */
621 update_stats(dev); 639 update_stats(dev);
622 if (status & RxEarly) { /* Rx early is unused. */ 640 if (status & RxEarly) {
623 el3_rx(dev); 641 /* Rx early is unused. */
624 outw(AckIntr | RxEarly, ioaddr + EL3_CMD); 642 el3_rx(dev);
625 } 643 outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
626 if (status & AdapterFailure) { 644 }
627 u16 fifo_diag; 645 if (status & AdapterFailure) {
628 EL3WINDOW(4); 646 u16 fifo_diag;
629 fifo_diag = inw(ioaddr + 4); 647 EL3WINDOW(4);
630 EL3WINDOW(1); 648 fifo_diag = inw(ioaddr + 4);
631 netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", 649 EL3WINDOW(1);
650 netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
632 fifo_diag); 651 fifo_diag);
633 if (fifo_diag & 0x0400) { 652 if (fifo_diag & 0x0400) {
634 /* Tx overrun */ 653 /* Tx overrun */
635 tc589_wait_for_completion(dev, TxReset); 654 tc589_wait_for_completion(dev, TxReset);
636 outw(TxEnable, ioaddr + EL3_CMD); 655 outw(TxEnable, ioaddr + EL3_CMD);
656 }
657 if (fifo_diag & 0x2000) {
658 /* Rx underrun */
659 tc589_wait_for_completion(dev, RxReset);
660 set_rx_mode(dev);
661 outw(RxEnable, ioaddr + EL3_CMD);
662 }
663 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
664 }
637 } 665 }
638 if (fifo_diag & 0x2000) { 666 if (++i > 10) {
639 /* Rx underrun */ 667 netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
640 tc589_wait_for_completion(dev, RxReset); 668 status);
641 set_rx_mode(dev); 669 /* Clear all interrupts */
642 outw(RxEnable, ioaddr + EL3_CMD); 670 outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
671 break;
643 } 672 }
644 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); 673 /* Acknowledge the IRQ. */
645 } 674 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
646 } 675 }
647 if (++i > 10) { 676 lp->last_irq = jiffies;
648 netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", 677 spin_unlock(&lp->lock);
649 status); 678 netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
650 /* Clear all interrupts */ 679 inw(ioaddr + EL3_STATUS));
651 outw(AckIntr | 0xFF, ioaddr + EL3_CMD); 680 return IRQ_RETVAL(handled);
652 break;
653 }
654 /* Acknowledge the IRQ. */
655 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
656 }
657 lp->last_irq = jiffies;
658 spin_unlock(&lp->lock);
659 netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
660 inw(ioaddr + EL3_STATUS));
661 return IRQ_RETVAL(handled);
662} 681}
663 682
664static void media_check(unsigned long arg) 683static void media_check(unsigned long arg)
665{ 684{
666 struct net_device *dev = (struct net_device *)(arg); 685 struct net_device *dev = (struct net_device *)(arg);
667 struct el3_private *lp = netdev_priv(dev); 686 struct el3_private *lp = netdev_priv(dev);
668 unsigned int ioaddr = dev->base_addr; 687 unsigned int ioaddr = dev->base_addr;
669 u16 media, errs; 688 u16 media, errs;
670 unsigned long flags; 689 unsigned long flags;
671 690
672 if (!netif_device_present(dev)) goto reschedule; 691 if (!netif_device_present(dev))
692 goto reschedule;
673 693
674 /* Check for pending interrupt with expired latency timer: with 694 /* Check for pending interrupt with expired latency timer: with
675 this, we can limp along even if the interrupt is blocked */ 695 * this, we can limp along even if the interrupt is blocked
676 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && 696 */
697 if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
677 (inb(ioaddr + EL3_TIMER) == 0xff)) { 698 (inb(ioaddr + EL3_TIMER) == 0xff)) {
678 if (!lp->fast_poll) 699 if (!lp->fast_poll)
679 netdev_warn(dev, "interrupt(s) dropped!\n"); 700 netdev_warn(dev, "interrupt(s) dropped!\n");
680 701
681 local_irq_save(flags); 702 local_irq_save(flags);
682 el3_interrupt(dev->irq, dev); 703 el3_interrupt(dev->irq, dev);
683 local_irq_restore(flags); 704 local_irq_restore(flags);
684 705
685 lp->fast_poll = HZ; 706 lp->fast_poll = HZ;
686 } 707 }
687 if (lp->fast_poll) { 708 if (lp->fast_poll) {
688 lp->fast_poll--; 709 lp->fast_poll--;
689 lp->media.expires = jiffies + HZ/100; 710 lp->media.expires = jiffies + HZ/100;
690 add_timer(&lp->media); 711 add_timer(&lp->media);
691 return; 712 return;
692 } 713 }
693 714
694 /* lp->lock guards the EL3 window. Window should always be 1 except 715 /* lp->lock guards the EL3 window. Window should always be 1 except
695 when the lock is held */ 716 * when the lock is held
696 spin_lock_irqsave(&lp->lock, flags); 717 */
697 EL3WINDOW(4); 718
698 media = inw(ioaddr+WN4_MEDIA) & 0xc810; 719 spin_lock_irqsave(&lp->lock, flags);
699 720 EL3WINDOW(4);
700 /* Ignore collisions unless we've had no irq's recently */ 721 media = inw(ioaddr+WN4_MEDIA) & 0xc810;
701 if (time_before(jiffies, lp->last_irq + HZ)) { 722
702 media &= ~0x0010; 723 /* Ignore collisions unless we've had no irq's recently */
703 } else { 724 if (time_before(jiffies, lp->last_irq + HZ)) {
704 /* Try harder to detect carrier errors */ 725 media &= ~0x0010;
705 EL3WINDOW(6); 726 } else {
706 outw(StatsDisable, ioaddr + EL3_CMD); 727 /* Try harder to detect carrier errors */
707 errs = inb(ioaddr + 0); 728 EL3WINDOW(6);
708 outw(StatsEnable, ioaddr + EL3_CMD); 729 outw(StatsDisable, ioaddr + EL3_CMD);
709 dev->stats.tx_carrier_errors += errs; 730 errs = inb(ioaddr + 0);
710 if (errs || (lp->media_status & 0x0010)) media |= 0x0010; 731 outw(StatsEnable, ioaddr + EL3_CMD);
711 } 732 dev->stats.tx_carrier_errors += errs;
733 if (errs || (lp->media_status & 0x0010))
734 media |= 0x0010;
735 }
712 736
713 if (media != lp->media_status) { 737 if (media != lp->media_status) {
714 if ((media & lp->media_status & 0x8000) && 738 if ((media & lp->media_status & 0x8000) &&
715 ((lp->media_status ^ media) & 0x0800)) 739 ((lp->media_status ^ media) & 0x0800))
716 netdev_info(dev, "%s link beat\n", 740 netdev_info(dev, "%s link beat\n",
717 (lp->media_status & 0x0800 ? "lost" : "found")); 741 (lp->media_status & 0x0800 ? "lost" : "found"));
718 else if ((media & lp->media_status & 0x4000) && 742 else if ((media & lp->media_status & 0x4000) &&
719 ((lp->media_status ^ media) & 0x0010)) 743 ((lp->media_status ^ media) & 0x0010))
720 netdev_info(dev, "coax cable %s\n", 744 netdev_info(dev, "coax cable %s\n",
721 (lp->media_status & 0x0010 ? "ok" : "problem")); 745 (lp->media_status & 0x0010 ? "ok" : "problem"));
722 if (dev->if_port == 0) { 746 if (dev->if_port == 0) {
723 if (media & 0x8000) { 747 if (media & 0x8000) {
724 if (media & 0x0800) 748 if (media & 0x0800)
725 netdev_info(dev, "flipped to 10baseT\n"); 749 netdev_info(dev, "flipped to 10baseT\n");
726 else 750 else
727 tc589_set_xcvr(dev, 2); 751 tc589_set_xcvr(dev, 2);
728 } else if (media & 0x4000) { 752 } else if (media & 0x4000) {
729 if (media & 0x0010) 753 if (media & 0x0010)
730 tc589_set_xcvr(dev, 1); 754 tc589_set_xcvr(dev, 1);
731 else 755 else
732 netdev_info(dev, "flipped to 10base2\n"); 756 netdev_info(dev, "flipped to 10base2\n");
733 } 757 }
758 }
759 lp->media_status = media;
734 } 760 }
735 lp->media_status = media;
736 }
737 761
738 EL3WINDOW(1); 762 EL3WINDOW(1);
739 spin_unlock_irqrestore(&lp->lock, flags); 763 spin_unlock_irqrestore(&lp->lock, flags);
740 764
741reschedule: 765reschedule:
742 lp->media.expires = jiffies + HZ; 766 lp->media.expires = jiffies + HZ;
743 add_timer(&lp->media); 767 add_timer(&lp->media);
744} 768}
745 769
746static struct net_device_stats *el3_get_stats(struct net_device *dev) 770static struct net_device_stats *el3_get_stats(struct net_device *dev)
747{ 771{
748 struct el3_private *lp = netdev_priv(dev); 772 struct el3_private *lp = netdev_priv(dev);
749 unsigned long flags; 773 unsigned long flags;
750 struct pcmcia_device *link = lp->p_dev; 774 struct pcmcia_device *link = lp->p_dev;
751 775
752 if (pcmcia_dev_present(link)) { 776 if (pcmcia_dev_present(link)) {
753 spin_lock_irqsave(&lp->lock, flags); 777 spin_lock_irqsave(&lp->lock, flags);
754 update_stats(dev); 778 update_stats(dev);
755 spin_unlock_irqrestore(&lp->lock, flags); 779 spin_unlock_irqrestore(&lp->lock, flags);
756 } 780 }
757 return &dev->stats; 781 return &dev->stats;
758} 782}
759 783
760/* 784/* Update statistics. We change to register window 6, so this should be run
761 Update statistics. We change to register window 6, so this should be run 785* single-threaded if the device is active. This is expected to be a rare
762 single-threaded if the device is active. This is expected to be a rare 786* operation, and it's simpler for the rest of the driver to assume that
763 operation, and it's simpler for the rest of the driver to assume that 787* window 1 is always valid rather than use a special window-state variable.
764 window 1 is always valid rather than use a special window-state variable. 788*
765 789* Caller must hold the lock for this
766 Caller must hold the lock for this
767*/ 790*/
791
768static void update_stats(struct net_device *dev) 792static void update_stats(struct net_device *dev)
769{ 793{
770 unsigned int ioaddr = dev->base_addr; 794 unsigned int ioaddr = dev->base_addr;
771 795
772 netdev_dbg(dev, "updating the statistics.\n"); 796 netdev_dbg(dev, "updating the statistics.\n");
773 /* Turn off statistics updates while reading. */ 797 /* Turn off statistics updates while reading. */
774 outw(StatsDisable, ioaddr + EL3_CMD); 798 outw(StatsDisable, ioaddr + EL3_CMD);
775 /* Switch to the stats window, and read everything. */ 799 /* Switch to the stats window, and read everything. */
776 EL3WINDOW(6); 800 EL3WINDOW(6);
777 dev->stats.tx_carrier_errors += inb(ioaddr + 0); 801 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
778 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); 802 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
779 /* Multiple collisions. */ inb(ioaddr + 2); 803 /* Multiple collisions. */
780 dev->stats.collisions += inb(ioaddr + 3); 804 inb(ioaddr + 2);
781 dev->stats.tx_window_errors += inb(ioaddr + 4); 805 dev->stats.collisions += inb(ioaddr + 3);
782 dev->stats.rx_fifo_errors += inb(ioaddr + 5); 806 dev->stats.tx_window_errors += inb(ioaddr + 4);
783 dev->stats.tx_packets += inb(ioaddr + 6); 807 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
784 /* Rx packets */ inb(ioaddr + 7); 808 dev->stats.tx_packets += inb(ioaddr + 6);
785 /* Tx deferrals */ inb(ioaddr + 8); 809 /* Rx packets */
786 /* Rx octets */ inw(ioaddr + 10); 810 inb(ioaddr + 7);
787 /* Tx octets */ inw(ioaddr + 12); 811 /* Tx deferrals */
788 812 inb(ioaddr + 8);
789 /* Back to window 1, and turn statistics back on. */ 813 /* Rx octets */
790 EL3WINDOW(1); 814 inw(ioaddr + 10);
791 outw(StatsEnable, ioaddr + EL3_CMD); 815 /* Tx octets */
816 inw(ioaddr + 12);
817
818 /* Back to window 1, and turn statistics back on. */
819 EL3WINDOW(1);
820 outw(StatsEnable, ioaddr + EL3_CMD);
792} 821}
793 822
794static int el3_rx(struct net_device *dev) 823static int el3_rx(struct net_device *dev)
795{ 824{
796 unsigned int ioaddr = dev->base_addr; 825 unsigned int ioaddr = dev->base_addr;
797 int worklimit = 32; 826 int worklimit = 32;
798 short rx_status; 827 short rx_status;
799 828
800 netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", 829 netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
801 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); 830 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
802 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && 831 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
803 worklimit > 0) { 832 worklimit > 0) {
804 worklimit--; 833 worklimit--;
805 if (rx_status & 0x4000) { /* Error, update stats. */ 834 if (rx_status & 0x4000) { /* Error, update stats. */
806 short error = rx_status & 0x3800; 835 short error = rx_status & 0x3800;
807 dev->stats.rx_errors++; 836 dev->stats.rx_errors++;
808 switch (error) { 837 switch (error) {
809 case 0x0000: dev->stats.rx_over_errors++; break; 838 case 0x0000:
810 case 0x0800: dev->stats.rx_length_errors++; break; 839 dev->stats.rx_over_errors++;
811 case 0x1000: dev->stats.rx_frame_errors++; break; 840 break;
812 case 0x1800: dev->stats.rx_length_errors++; break; 841 case 0x0800:
813 case 0x2000: dev->stats.rx_frame_errors++; break; 842 dev->stats.rx_length_errors++;
814 case 0x2800: dev->stats.rx_crc_errors++; break; 843 break;
815 } 844 case 0x1000:
816 } else { 845 dev->stats.rx_frame_errors++;
817 short pkt_len = rx_status & 0x7ff; 846 break;
818 struct sk_buff *skb; 847 case 0x1800:
819 848 dev->stats.rx_length_errors++;
820 skb = netdev_alloc_skb(dev, pkt_len + 5); 849 break;
821 850 case 0x2000:
822 netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", 851 dev->stats.rx_frame_errors++;
852 break;
853 case 0x2800:
854 dev->stats.rx_crc_errors++;
855 break;
856 }
857 } else {
858 short pkt_len = rx_status & 0x7ff;
859 struct sk_buff *skb;
860
861 skb = netdev_alloc_skb(dev, pkt_len + 5);
862
863 netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
823 pkt_len, rx_status); 864 pkt_len, rx_status);
824 if (skb != NULL) { 865 if (skb != NULL) {
825 skb_reserve(skb, 2); 866 skb_reserve(skb, 2);
826 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), 867 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
827 (pkt_len+3)>>2); 868 (pkt_len+3)>>2);
828 skb->protocol = eth_type_trans(skb, dev); 869 skb->protocol = eth_type_trans(skb, dev);
829 netif_rx(skb); 870 netif_rx(skb);
830 dev->stats.rx_packets++; 871 dev->stats.rx_packets++;
831 dev->stats.rx_bytes += pkt_len; 872 dev->stats.rx_bytes += pkt_len;
832 } else { 873 } else {
833 netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", 874 netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
834 pkt_len); 875 pkt_len);
835 dev->stats.rx_dropped++; 876 dev->stats.rx_dropped++;
836 } 877 }
878 }
879 /* Pop the top of the Rx FIFO */
880 tc589_wait_for_completion(dev, RxDiscard);
837 } 881 }
838 /* Pop the top of the Rx FIFO */ 882 if (worklimit == 0)
839 tc589_wait_for_completion(dev, RxDiscard); 883 netdev_warn(dev, "too much work in el3_rx!\n");
840 } 884 return 0;
841 if (worklimit == 0)
842 netdev_warn(dev, "too much work in el3_rx!\n");
843 return 0;
844} 885}
845 886
846static void set_rx_mode(struct net_device *dev) 887static void set_rx_mode(struct net_device *dev)
847{ 888{
848 unsigned int ioaddr = dev->base_addr; 889 unsigned int ioaddr = dev->base_addr;
849 u16 opts = SetRxFilter | RxStation | RxBroadcast; 890 u16 opts = SetRxFilter | RxStation | RxBroadcast;
850 891
851 if (dev->flags & IFF_PROMISC) 892 if (dev->flags & IFF_PROMISC)
852 opts |= RxMulticast | RxProm; 893 opts |= RxMulticast | RxProm;
853 else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) 894 else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
854 opts |= RxMulticast; 895 opts |= RxMulticast;
855 outw(opts, ioaddr + EL3_CMD); 896 outw(opts, ioaddr + EL3_CMD);
856} 897}
857 898
858static void set_multicast_list(struct net_device *dev) 899static void set_multicast_list(struct net_device *dev)
@@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev)
867 908
868static int el3_close(struct net_device *dev) 909static int el3_close(struct net_device *dev)
869{ 910{
870 struct el3_private *lp = netdev_priv(dev); 911 struct el3_private *lp = netdev_priv(dev);
871 struct pcmcia_device *link = lp->p_dev; 912 struct pcmcia_device *link = lp->p_dev;
872 unsigned int ioaddr = dev->base_addr; 913 unsigned int ioaddr = dev->base_addr;
873 914
874 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); 915 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
916
917 if (pcmcia_dev_present(link)) {
918 /* Turn off statistics ASAP. We update dev->stats below. */
919 outw(StatsDisable, ioaddr + EL3_CMD);
920
921 /* Disable the receiver and transmitter. */
922 outw(RxDisable, ioaddr + EL3_CMD);
923 outw(TxDisable, ioaddr + EL3_CMD);
924
925 if (dev->if_port == 2)
926 /* Turn off thinnet power. Green! */
927 outw(StopCoax, ioaddr + EL3_CMD);
928 else if (dev->if_port == 1) {
929 /* Disable link beat and jabber */
930 EL3WINDOW(4);
931 outw(0, ioaddr + WN4_MEDIA);
932 }
875 933
876 if (pcmcia_dev_present(link)) { 934 /* Switching back to window 0 disables the IRQ. */
877 /* Turn off statistics ASAP. We update dev->stats below. */ 935 EL3WINDOW(0);
878 outw(StatsDisable, ioaddr + EL3_CMD); 936 /* But we explicitly zero the IRQ line select anyway. */
937 outw(0x0f00, ioaddr + WN0_IRQ);
879 938
880 /* Disable the receiver and transmitter. */ 939 /* Check if the card still exists */
881 outw(RxDisable, ioaddr + EL3_CMD); 940 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
882 outw(TxDisable, ioaddr + EL3_CMD); 941 update_stats(dev);
883
884 if (dev->if_port == 2)
885 /* Turn off thinnet power. Green! */
886 outw(StopCoax, ioaddr + EL3_CMD);
887 else if (dev->if_port == 1) {
888 /* Disable link beat and jabber */
889 EL3WINDOW(4);
890 outw(0, ioaddr + WN4_MEDIA);
891 } 942 }
892 943
893 /* Switching back to window 0 disables the IRQ. */ 944 link->open--;
894 EL3WINDOW(0); 945 netif_stop_queue(dev);
895 /* But we explicitly zero the IRQ line select anyway. */ 946 del_timer_sync(&lp->media);
896 outw(0x0f00, ioaddr + WN0_IRQ);
897
898 /* Check if the card still exists */
899 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
900 update_stats(dev);
901 }
902
903 link->open--;
904 netif_stop_queue(dev);
905 del_timer_sync(&lp->media);
906 947
907 return 0; 948 return 0;
908} 949}
909 950
910static const struct pcmcia_device_id tc589_ids[] = { 951static const struct pcmcia_device_id tc589_ids[] = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 238ccea965c8..61477b8e8d24 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2086 /* ... and the packet rounded to a doubleword. */ 2086 /* ... and the packet rounded to a doubleword. */
2087 skb_tx_timestamp(skb); 2087 skb_tx_timestamp(skb);
2088 iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); 2088 iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
2089 dev_kfree_skb (skb); 2089 dev_consume_skb_any (skb);
2090 if (ioread16(ioaddr + TxFree) > 1536) { 2090 if (ioread16(ioaddr + TxFree) > 1536) {
2091 netif_start_queue (dev); /* AKPM: redundant? */ 2091 netif_start_queue (dev); /* AKPM: redundant? */
2092 } else { 2092 } else {
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index d2cd80444ade..599311f0e05c 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
404 spin_unlock(&ei_local->page_lock); 404 spin_unlock(&ei_local->page_lock);
405 enable_irq_lockdep_irqrestore(dev->irq, &flags); 405 enable_irq_lockdep_irqrestore(dev->irq, &flags);
406 skb_tx_timestamp(skb); 406 skb_tx_timestamp(skb);
407 dev_kfree_skb(skb); 407 dev_consume_skb_any(skb);
408 dev->stats.tx_bytes += send_length; 408 dev->stats.tx_bytes += send_length;
409 409
410 return NETDEV_TX_OK; 410 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 506b0248c400..39b26fe28d10 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -22,6 +22,7 @@ source "drivers/net/ethernet/adaptec/Kconfig"
22source "drivers/net/ethernet/aeroflex/Kconfig" 22source "drivers/net/ethernet/aeroflex/Kconfig"
23source "drivers/net/ethernet/allwinner/Kconfig" 23source "drivers/net/ethernet/allwinner/Kconfig"
24source "drivers/net/ethernet/alteon/Kconfig" 24source "drivers/net/ethernet/alteon/Kconfig"
25source "drivers/net/ethernet/altera/Kconfig"
25source "drivers/net/ethernet/amd/Kconfig" 26source "drivers/net/ethernet/amd/Kconfig"
26source "drivers/net/ethernet/apple/Kconfig" 27source "drivers/net/ethernet/apple/Kconfig"
27source "drivers/net/ethernet/arc/Kconfig" 28source "drivers/net/ethernet/arc/Kconfig"
@@ -149,6 +150,7 @@ config S6GMAC
149 To compile this driver as a module, choose M here. The module 150 To compile this driver as a module, choose M here. The module
150 will be called s6gmac. 151 will be called s6gmac.
151 152
153source "drivers/net/ethernet/samsung/Kconfig"
152source "drivers/net/ethernet/seeq/Kconfig" 154source "drivers/net/ethernet/seeq/Kconfig"
153source "drivers/net/ethernet/silan/Kconfig" 155source "drivers/net/ethernet/silan/Kconfig"
154source "drivers/net/ethernet/sis/Kconfig" 156source "drivers/net/ethernet/sis/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c0b8789952e7..545d0b3b9cb4 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
8obj-$(CONFIG_GRETH) += aeroflex/ 8obj-$(CONFIG_GRETH) += aeroflex/
9obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ 9obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
10obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ 10obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
11obj-$(CONFIG_ALTERA_TSE) += altera/
11obj-$(CONFIG_NET_VENDOR_AMD) += amd/ 12obj-$(CONFIG_NET_VENDOR_AMD) += amd/
12obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ 13obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
13obj-$(CONFIG_NET_VENDOR_ARC) += arc/ 14obj-$(CONFIG_NET_VENDOR_ARC) += arc/
@@ -60,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
60obj-$(CONFIG_SH_ETH) += renesas/ 61obj-$(CONFIG_SH_ETH) += renesas/
61obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ 62obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
62obj-$(CONFIG_S6GMAC) += s6gmac.o 63obj-$(CONFIG_S6GMAC) += s6gmac.o
64obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
63obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ 65obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
64obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ 66obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
65obj-$(CONFIG_NET_VENDOR_SIS) += sis/ 67obj-$(CONFIG_NET_VENDOR_SIS) += sis/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index c0f68dcd1dc1..7ae74d450e8f 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -307,11 +307,6 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
307 return bfin_mdio_poll(); 307 return bfin_mdio_poll();
308} 308}
309 309
310static int bfin_mdiobus_reset(struct mii_bus *bus)
311{
312 return 0;
313}
314
315static void bfin_mac_adjust_link(struct net_device *dev) 310static void bfin_mac_adjust_link(struct net_device *dev)
316{ 311{
317 struct bfin_mac_local *lp = netdev_priv(dev); 312 struct bfin_mac_local *lp = netdev_priv(dev);
@@ -1040,6 +1035,7 @@ static struct ptp_clock_info bfin_ptp_caps = {
1040 .n_alarm = 0, 1035 .n_alarm = 0,
1041 .n_ext_ts = 0, 1036 .n_ext_ts = 0,
1042 .n_per_out = 0, 1037 .n_per_out = 0,
1038 .n_pins = 0,
1043 .pps = 0, 1039 .pps = 0,
1044 .adjfreq = bfin_ptp_adjfreq, 1040 .adjfreq = bfin_ptp_adjfreq,
1045 .adjtime = bfin_ptp_adjtime, 1041 .adjtime = bfin_ptp_adjtime,
@@ -1086,7 +1082,7 @@ static inline void _tx_reclaim_skb(void)
1086 tx_list_head->desc_a.config &= ~DMAEN; 1082 tx_list_head->desc_a.config &= ~DMAEN;
1087 tx_list_head->status.status_word = 0; 1083 tx_list_head->status.status_word = 0;
1088 if (tx_list_head->skb) { 1084 if (tx_list_head->skb) {
1089 dev_kfree_skb(tx_list_head->skb); 1085 dev_consume_skb_any(tx_list_head->skb);
1090 tx_list_head->skb = NULL; 1086 tx_list_head->skb = NULL;
1091 } 1087 }
1092 tx_list_head = tx_list_head->next; 1088 tx_list_head = tx_list_head->next;
@@ -1823,7 +1819,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev)
1823 goto out_err_alloc; 1819 goto out_err_alloc;
1824 miibus->read = bfin_mdiobus_read; 1820 miibus->read = bfin_mdiobus_read;
1825 miibus->write = bfin_mdiobus_write; 1821 miibus->write = bfin_mdiobus_write;
1826 miibus->reset = bfin_mdiobus_reset;
1827 1822
1828 miibus->parent = &pdev->dev; 1823 miibus->parent = &pdev->dev;
1829 miibus->name = "bfin_mii_bus"; 1824 miibus->name = "bfin_mii_bus";
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c5d75e7aeeb6..23578dfee249 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1213,11 +1213,6 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1213 return 0; 1213 return 0;
1214} 1214}
1215 1215
1216static int greth_mdio_reset(struct mii_bus *bus)
1217{
1218 return 0;
1219}
1220
1221static void greth_link_change(struct net_device *dev) 1216static void greth_link_change(struct net_device *dev)
1222{ 1217{
1223 struct greth_private *greth = netdev_priv(dev); 1218 struct greth_private *greth = netdev_priv(dev);
@@ -1332,7 +1327,6 @@ static int greth_mdio_init(struct greth_private *greth)
1332 snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq); 1327 snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1333 greth->mdio->read = greth_mdio_read; 1328 greth->mdio->read = greth_mdio_read;
1334 greth->mdio->write = greth_mdio_write; 1329 greth->mdio->write = greth_mdio_write;
1335 greth->mdio->reset = greth_mdio_reset;
1336 greth->mdio->priv = greth; 1330 greth->mdio->priv = greth;
1337 1331
1338 greth->mdio->irq = greth->mdio_irqs; 1332 greth->mdio->irq = greth->mdio_irqs;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 511f6eecd58b..fcaeeb8a4929 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
476 spin_unlock_irqrestore(&db->lock, flags); 476 spin_unlock_irqrestore(&db->lock, flags);
477 477
478 /* free this SKB */ 478 /* free this SKB */
479 dev_kfree_skb(skb); 479 dev_consume_skb_any(skb);
480 480
481 return NETDEV_TX_OK; 481 return NETDEV_TX_OK;
482} 482}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
new file mode 100644
index 000000000000..80c1ab74a4b8
--- /dev/null
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -0,0 +1,8 @@
1config ALTERA_TSE
2 tristate "Altera Triple-Speed Ethernet MAC support"
3 select PHYLIB
4 ---help---
5 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
6
7 To compile this driver as a module, choose M here. The module
8 will be called alteratse.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
new file mode 100644
index 000000000000..d4a187e45369
--- /dev/null
+++ b/drivers/net/ethernet/altera/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Altera device drivers.
3#
4
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
new file mode 100644
index 000000000000..3df18669ea30
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -0,0 +1,202 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/netdevice.h>
18#include "altera_utils.h"
19#include "altera_tse.h"
20#include "altera_msgdmahw.h"
21
22/* No initialization work to do for MSGDMA */
23int msgdma_initialize(struct altera_tse_private *priv)
24{
25 return 0;
26}
27
28void msgdma_uninitialize(struct altera_tse_private *priv)
29{
30}
31
32void msgdma_reset(struct altera_tse_private *priv)
33{
34 int counter;
35 struct msgdma_csr *txcsr =
36 (struct msgdma_csr *)priv->tx_dma_csr;
37 struct msgdma_csr *rxcsr =
38 (struct msgdma_csr *)priv->rx_dma_csr;
39
40 /* Reset Rx mSGDMA */
41 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
42 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
43
44 counter = 0;
45 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
46 if (tse_bit_is_clear(&rxcsr->status,
47 MSGDMA_CSR_STAT_RESETTING))
48 break;
49 udelay(1);
50 }
51
52 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
53 netif_warn(priv, drv, priv->dev,
54 "TSE Rx mSGDMA resetting bit never cleared!\n");
55
56 /* clear all status bits */
57 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
58
59 /* Reset Tx mSGDMA */
60 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
61 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
62
63 counter = 0;
64 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
65 if (tse_bit_is_clear(&txcsr->status,
66 MSGDMA_CSR_STAT_RESETTING))
67 break;
68 udelay(1);
69 }
70
71 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
72 netif_warn(priv, drv, priv->dev,
73 "TSE Tx mSGDMA resetting bit never cleared!\n");
74
75 /* clear all status bits */
76 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
77}
78
79void msgdma_disable_rxirq(struct altera_tse_private *priv)
80{
81 struct msgdma_csr *csr = priv->rx_dma_csr;
82 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
83}
84
85void msgdma_enable_rxirq(struct altera_tse_private *priv)
86{
87 struct msgdma_csr *csr = priv->rx_dma_csr;
88 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
89}
90
91void msgdma_disable_txirq(struct altera_tse_private *priv)
92{
93 struct msgdma_csr *csr = priv->tx_dma_csr;
94 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
95}
96
97void msgdma_enable_txirq(struct altera_tse_private *priv)
98{
99 struct msgdma_csr *csr = priv->tx_dma_csr;
100 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
101}
102
103void msgdma_clear_rxirq(struct altera_tse_private *priv)
104{
105 struct msgdma_csr *csr = priv->rx_dma_csr;
106 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
107}
108
109void msgdma_clear_txirq(struct altera_tse_private *priv)
110{
111 struct msgdma_csr *csr = priv->tx_dma_csr;
112 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
113}
114
115/* return 0 to indicate transmit is pending */
116int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
117{
118 struct msgdma_extended_desc *desc = priv->tx_dma_desc;
119
120 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
121 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
122 iowrite32(0, &desc->write_addr_lo);
123 iowrite32(0, &desc->write_addr_hi);
124 iowrite32(buffer->len, &desc->len);
125 iowrite32(0, &desc->burst_seq_num);
126 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
127 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
128 return 0;
129}
130
131u32 msgdma_tx_completions(struct altera_tse_private *priv)
132{
133 u32 ready = 0;
134 u32 inuse;
135 u32 status;
136 struct msgdma_csr *txcsr =
137 (struct msgdma_csr *)priv->tx_dma_csr;
138
139 /* Get number of sent descriptors */
140 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
141
142 if (inuse) { /* Tx FIFO is not empty */
143 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
144 } else {
145 /* Check for buffered last packet */
146 status = ioread32(&txcsr->status);
147 if (status & MSGDMA_CSR_STAT_BUSY)
148 ready = priv->tx_prod - priv->tx_cons - 1;
149 else
150 ready = priv->tx_prod - priv->tx_cons;
151 }
152 return ready;
153}
154
155/* Put buffer to the mSGDMA RX FIFO
156 */
157int msgdma_add_rx_desc(struct altera_tse_private *priv,
158 struct tse_buffer *rxbuffer)
159{
160 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
161 u32 len = priv->rx_dma_buf_sz;
162 dma_addr_t dma_addr = rxbuffer->dma_addr;
163 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
164 | MSGDMA_DESC_CTL_END_ON_LEN
165 | MSGDMA_DESC_CTL_TR_COMP_IRQ
166 | MSGDMA_DESC_CTL_EARLY_IRQ
167 | MSGDMA_DESC_CTL_TR_ERR_IRQ
168 | MSGDMA_DESC_CTL_GO);
169
170 iowrite32(0, &desc->read_addr_lo);
171 iowrite32(0, &desc->read_addr_hi);
172 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
173 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
174 iowrite32(len, &desc->len);
175 iowrite32(0, &desc->burst_seq_num);
176 iowrite32(0x00010001, &desc->stride);
177 iowrite32(control, &desc->control);
178 return 1;
179}
180
181/* status is returned on upper 16 bits,
182 * length is returned in lower 16 bits
183 */
184u32 msgdma_rx_status(struct altera_tse_private *priv)
185{
186 u32 rxstatus = 0;
187 u32 pktlength;
188 u32 pktstatus;
189 struct msgdma_csr *rxcsr =
190 (struct msgdma_csr *)priv->rx_dma_csr;
191 struct msgdma_response *rxresp =
192 (struct msgdma_response *)priv->rx_dma_resp;
193
194 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
195 pktlength = ioread32(&rxresp->bytes_transferred);
196 pktstatus = ioread32(&rxresp->status);
197 rxstatus = pktstatus;
198 rxstatus = rxstatus << 16;
199 rxstatus |= (pktlength & 0xffff);
200 }
201 return rxstatus;
202}
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
new file mode 100644
index 000000000000..7f0f5bf2bba2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -0,0 +1,34 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_MSGDMA_H__
18#define __ALTERA_MSGDMA_H__
19
20void msgdma_reset(struct altera_tse_private *);
21void msgdma_enable_txirq(struct altera_tse_private *);
22void msgdma_enable_rxirq(struct altera_tse_private *);
23void msgdma_disable_rxirq(struct altera_tse_private *);
24void msgdma_disable_txirq(struct altera_tse_private *);
25void msgdma_clear_rxirq(struct altera_tse_private *);
26void msgdma_clear_txirq(struct altera_tse_private *);
27u32 msgdma_tx_completions(struct altera_tse_private *);
28int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
30u32 msgdma_rx_status(struct altera_tse_private *);
31int msgdma_initialize(struct altera_tse_private *);
32void msgdma_uninitialize(struct altera_tse_private *);
33
34#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
new file mode 100644
index 000000000000..d7b59ba4019c
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -0,0 +1,167 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__
19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format
30 */
31struct msgdma_extended_desc {
32 u32 read_addr_lo; /* data buffer source address low bits */
33 u32 write_addr_lo; /* data buffer destination address low bits */
34 u32 len; /* the number of bytes to transfer
35 * per descriptor
36 */
37 u32 burst_seq_num; /* bit 31:24 write burst
38 * bit 23:16 read burst
39 * bit 15:0 sequence number
40 */
41 u32 stride; /* bit 31:16 write stride
42 * bit 15:0 read stride
43 */
44 u32 read_addr_hi; /* data buffer source address high bits */
45 u32 write_addr_hi; /* data buffer destination address high bits */
46 u32 control; /* characteristics of the transfer */
47};
48
49/* mSGDMA descriptor control field bit definitions
50 */
51#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
52#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
53#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
54#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
55#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
56#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
57#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
58#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
59#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
60#define MSGDMA_DESC_CTL_TR_ERR_IRQ (0xff << 16)
61#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
62/* Writing ‘1’ to the ‘go’ bit commits the entire descriptor into the
63 * descriptor FIFO(s)
64 */
65#define MSGDMA_DESC_CTL_GO BIT(31)
66
67/* Tx buffer control flags
68 */
69#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
70 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
71 MSGDMA_DESC_CTL_GO)
72
73#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
74 MSGDMA_DESC_CTL_GO)
75
76#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
77 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
78 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
79 MSGDMA_DESC_CTL_GO)
80
81#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
82 MSGDMA_DESC_CTL_GEN_EOP | \
83 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
84 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
85 MSGDMA_DESC_CTL_GO)
86
87#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
88 MSGDMA_DESC_CTL_END_ON_LEN | \
89 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
90 MSGDMA_DESC_CTL_EARLY_IRQ | \
91 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
92 MSGDMA_DESC_CTL_GO)
93
94/* mSGDMA extended descriptor stride definitions
95 */
96#define MSGDMA_DESC_TX_STRIDE (0x00010001)
97#define MSGDMA_DESC_RX_STRIDE (0x00010001)
98
99/* mSGDMA dispatcher control and status register map
100 */
101struct msgdma_csr {
102 u32 status; /* Read/Clear */
103 u32 control; /* Read/Write */
104 u32 rw_fill_level; /* bit 31:16 - write fill level
105 * bit 15:0 - read fill level
106 */
107 u32 resp_fill_level; /* bit 15:0 */
108 u32 rw_seq_num; /* bit 31:16 - write sequence number
109 * bit 15:0 - read sequence number
110 */
111 u32 pad[3]; /* reserved */
112};
113
114/* mSGDMA CSR status register bit definitions
115 */
116#define MSGDMA_CSR_STAT_BUSY BIT(0)
117#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
118#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
119#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
120#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
121#define MSGDMA_CSR_STAT_STOPPED BIT(5)
122#define MSGDMA_CSR_STAT_RESETTING BIT(6)
123#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
124#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
125#define MSGDMA_CSR_STAT_IRQ BIT(9)
126#define MSGDMA_CSR_STAT_MASK 0x3FF
127#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ 0x1FF
128
129#define MSGDMA_CSR_STAT_BUSY_GET(v) GET_BIT_VALUE(v, 0)
130#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 1)
131#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v) GET_BIT_VALUE(v, 2)
132#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 3)
133#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v) GET_BIT_VALUE(v, 4)
134#define MSGDMA_CSR_STAT_STOPPED_GET(v) GET_BIT_VALUE(v, 5)
135#define MSGDMA_CSR_STAT_RESETTING_GET(v) GET_BIT_VALUE(v, 6)
136#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v) GET_BIT_VALUE(v, 7)
137#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v) GET_BIT_VALUE(v, 8)
138#define MSGDMA_CSR_STAT_IRQ_GET(v) GET_BIT_VALUE(v, 9)
139
140/* mSGDMA CSR control register bit definitions
141 */
142#define MSGDMA_CSR_CTL_STOP BIT(0)
143#define MSGDMA_CSR_CTL_RESET BIT(1)
144#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
145#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
146#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
147#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
148
149/* mSGDMA CSR fill level bits
150 */
151#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
152#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
153#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
154
155/* mSGDMA response register map
156 */
157struct msgdma_response {
158 u32 bytes_transferred;
159 u32 status;
160};
161
162/* mSGDMA response register bit definitions
163 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8)
165#define MSGDMA_RESP_ERR_MASK 0xFF
166
167#endif /* __ALTERA_MSGDMA_H__*/
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
new file mode 100644
index 000000000000..0ee96639ae44
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -0,0 +1,509 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/list.h>
18#include "altera_utils.h"
19#include "altera_tse.h"
20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h"
22
23static void sgdma_descrip(struct sgdma_descrip *desc,
24 struct sgdma_descrip *ndesc,
25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr,
27 dma_addr_t waddr,
28 u16 length,
29 int generate_eop,
30 int rfixed,
31 int wfixed);
32
33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc);
35
36static int sgdma_async_read(struct altera_tse_private *priv);
37
38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc);
41
42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc);
45
46static int sgdma_txbusy(struct altera_tse_private *priv);
47
48static int sgdma_rxbusy(struct altera_tse_private *priv);
49
50static void
51queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
52
53static void
54queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
55
56static struct tse_buffer *
57dequeue_tx(struct altera_tse_private *priv);
58
59static struct tse_buffer *
60dequeue_rx(struct altera_tse_private *priv);
61
62static struct tse_buffer *
63queue_rx_peekhead(struct altera_tse_private *priv);
64
65int sgdma_initialize(struct altera_tse_private *priv)
66{
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
68
69 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
70 SGDMA_CTRLREG_ILASTD;
71
72 INIT_LIST_HEAD(&priv->txlisthd);
73 INIT_LIST_HEAD(&priv->rxlisthd);
74
75 priv->rxdescphys = (dma_addr_t) 0;
76 priv->txdescphys = (dma_addr_t) 0;
77
78 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
79 priv->rxdescmem, DMA_BIDIRECTIONAL);
80
81 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
82 sgdma_uninitialize(priv);
83 netdev_err(priv->dev, "error mapping rx descriptor memory\n");
84 return -EINVAL;
85 }
86
87 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
88 priv->txdescmem, DMA_TO_DEVICE);
89
90 if (dma_mapping_error(priv->device, priv->txdescphys)) {
91 sgdma_uninitialize(priv);
92 netdev_err(priv->dev, "error mapping tx descriptor memory\n");
93 return -EINVAL;
94 }
95
96 return 0;
97}
98
99void sgdma_uninitialize(struct altera_tse_private *priv)
100{
101 if (priv->rxdescphys)
102 dma_unmap_single(priv->device, priv->rxdescphys,
103 priv->rxdescmem, DMA_BIDIRECTIONAL);
104
105 if (priv->txdescphys)
106 dma_unmap_single(priv->device, priv->txdescphys,
107 priv->txdescmem, DMA_TO_DEVICE);
108}
109
110/* This function resets the SGDMA controller and clears the
111 * descriptor memory used for transmits and receives.
112 */
113void sgdma_reset(struct altera_tse_private *priv)
114{
115 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 u32 txdescriplen = priv->txdescmem;
117 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 u32 rxdescriplen = priv->rxdescmem;
119 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
121
122 /* Initialize descriptor memory to 0 */
123 memset(ptxdescripmem, 0, txdescriplen);
124 memset(prxdescripmem, 0, rxdescriplen);
125
126 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
127 iowrite32(0, &ptxsgdma->control);
128
129 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
130 iowrite32(0, &prxsgdma->control);
131}
132
133void sgdma_enable_rxirq(struct altera_tse_private *priv)
134{
135 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
138}
139
140void sgdma_enable_txirq(struct altera_tse_private *priv)
141{
142 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
145}
146
147/* for SGDMA, RX interrupts remain enabled after enabling */
148void sgdma_disable_rxirq(struct altera_tse_private *priv)
149{
150}
151
152/* for SGDMA, TX interrupts remain enabled after enabling */
153void sgdma_disable_txirq(struct altera_tse_private *priv)
154{
155}
156
157void sgdma_clear_rxirq(struct altera_tse_private *priv)
158{
159 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
160 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
161}
162
163void sgdma_clear_txirq(struct altera_tse_private *priv)
164{
165 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
166 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
167}
168
169/* transmits buffer through SGDMA. Returns number of buffers
170 * transmitted, 0 if not possible.
171 *
172 * tx_lock is held by the caller
173 */
174int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
175{
176 int pktstx = 0;
177 struct sgdma_descrip *descbase =
178 (struct sgdma_descrip *)priv->tx_dma_desc;
179
180 struct sgdma_descrip *cdesc = &descbase[0];
181 struct sgdma_descrip *ndesc = &descbase[1];
182
183 /* wait 'til the tx sgdma is ready for the next transmit request */
184 if (sgdma_txbusy(priv))
185 return 0;
186
187 sgdma_descrip(cdesc, /* current descriptor */
188 ndesc, /* next descriptor */
189 sgdma_txphysaddr(priv, ndesc),
190 buffer->dma_addr, /* address of packet to xmit */
191 0, /* write addr 0 for tx dma */
192 buffer->len, /* length of packet */
193 SGDMA_CONTROL_EOP, /* Generate EOP */
194 0, /* read fixed */
195 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
196
197 pktstx = sgdma_async_write(priv, cdesc);
198
199 /* enqueue the request to the pending transmit queue */
200 queue_tx(priv, buffer);
201
202 return 1;
203}
204
205
206/* tx_lock held to protect access to queued tx list
207 */
208u32 sgdma_tx_completions(struct altera_tse_private *priv)
209{
210 u32 ready = 0;
211 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
212
213 if (!sgdma_txbusy(priv) &&
214 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 (dequeue_tx(priv))) {
216 ready = 1;
217 }
218
219 return ready;
220}
221
222int sgdma_add_rx_desc(struct altera_tse_private *priv,
223 struct tse_buffer *rxbuffer)
224{
225 queue_rx(priv, rxbuffer);
226 return sgdma_async_read(priv);
227}
228
229/* status is returned on upper 16 bits,
230 * length is returned in lower 16 bits
231 */
232u32 sgdma_rx_status(struct altera_tse_private *priv)
233{
234 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
235 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
236 struct sgdma_descrip *desc = NULL;
237 int pktsrx;
238 unsigned int rxstatus = 0;
239 unsigned int pktlength = 0;
240 unsigned int pktstatus = 0;
241 struct tse_buffer *rxbuffer = NULL;
242
243 dma_sync_single_for_cpu(priv->device,
244 priv->rxdescphys,
245 priv->rxdescmem,
246 DMA_BIDIRECTIONAL);
247
248 desc = &base[0];
249 if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
250 (desc->status & SGDMA_STATUS_EOP)) {
251 pktlength = desc->bytes_xferred;
252 pktstatus = desc->status & 0x3f;
253 rxstatus = pktstatus;
254 rxstatus = rxstatus << 16;
255 rxstatus |= (pktlength & 0xffff);
256
257 desc->status = 0;
258
259 rxbuffer = dequeue_rx(priv);
260 if (rxbuffer == NULL)
261 netdev_err(priv->dev,
262 "sgdma rx and rx queue empty!\n");
263
264 /* kick the rx sgdma after reaping this descriptor */
265 pktsrx = sgdma_async_read(priv);
266 }
267
268 return rxstatus;
269}
270
271
272/* Private functions */
273static void sgdma_descrip(struct sgdma_descrip *desc,
274 struct sgdma_descrip *ndesc,
275 dma_addr_t ndesc_phys,
276 dma_addr_t raddr,
277 dma_addr_t waddr,
278 u16 length,
279 int generate_eop,
280 int rfixed,
281 int wfixed)
282{
283 /* Clear the next descriptor as not owned by hardware */
284 u32 ctrl = ndesc->control;
285 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 ndesc->control = ctrl;
287
288 ctrl = 0;
289 ctrl = SGDMA_CONTROL_HW_OWNED;
290 ctrl |= generate_eop;
291 ctrl |= rfixed;
292 ctrl |= wfixed;
293
294 /* Channel is implicitly zero, initialized to 0 by default */
295
296 desc->raddr = raddr;
297 desc->waddr = waddr;
298 desc->next = lower_32_bits(ndesc_phys);
299 desc->control = ctrl;
300 desc->status = 0;
301 desc->rburst = 0;
302 desc->wburst = 0;
303 desc->bytes = length;
304 desc->bytes_xferred = 0;
305}
306
307/* If hardware is busy, don't restart async read.
308 * if status register is 0 - meaning initial state, restart async read,
309 * probably for the first time when populating a receive buffer.
310 * If read status indicate not busy and a status, restart the async
311 * DMA read.
312 */
313static int sgdma_async_read(struct altera_tse_private *priv)
314{
315 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
316 struct sgdma_descrip *descbase =
317 (struct sgdma_descrip *)priv->rx_dma_desc;
318
319 struct sgdma_descrip *cdesc = &descbase[0];
320 struct sgdma_descrip *ndesc = &descbase[1];
321
322 unsigned int sts = ioread32(&csr->status);
323 struct tse_buffer *rxbuffer = NULL;
324
325 if (!sgdma_rxbusy(priv)) {
326 rxbuffer = queue_rx_peekhead(priv);
327 if (rxbuffer == NULL)
328 return 0;
329
330 sgdma_descrip(cdesc, /* current descriptor */
331 ndesc, /* next descriptor */
332 sgdma_rxphysaddr(priv, ndesc),
333 0, /* read addr 0 for rx dma */
334 rxbuffer->dma_addr, /* write addr for rx dma */
335 0, /* read 'til EOP */
336 0, /* EOP: NA for rx dma */
337 0, /* read fixed: NA for rx dma */
338 0); /* SOP: NA for rx DMA */
339
340 /* clear control and status */
341 iowrite32(0, &csr->control);
342
343 /* If status available, clear those bits */
344 if (sts & 0xf)
345 iowrite32(0xf, &csr->status);
346
347 dma_sync_single_for_device(priv->device,
348 priv->rxdescphys,
349 priv->rxdescmem,
350 DMA_BIDIRECTIONAL);
351
352 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
353 &csr->next_descrip);
354
355 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
356 &csr->control);
357
358 return 1;
359 }
360
361 return 0;
362}
363
364static int sgdma_async_write(struct altera_tse_private *priv,
365 struct sgdma_descrip *desc)
366{
367 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
368
369 if (sgdma_txbusy(priv))
370 return 0;
371
372 /* clear control and status */
373 iowrite32(0, &csr->control);
374 iowrite32(0x1f, &csr->status);
375
376 dma_sync_single_for_device(priv->device, priv->txdescphys,
377 priv->txdescmem, DMA_TO_DEVICE);
378
379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
380 &csr->next_descrip);
381
382 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
383 &csr->control);
384
385 return 1;
386}
387
388static dma_addr_t
389sgdma_txphysaddr(struct altera_tse_private *priv,
390 struct sgdma_descrip *desc)
391{
392 dma_addr_t paddr = priv->txdescmem_busaddr;
393 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
394 return (dma_addr_t)((uintptr_t)paddr + offs);
395}
396
397static dma_addr_t
398sgdma_rxphysaddr(struct altera_tse_private *priv,
399 struct sgdma_descrip *desc)
400{
401 dma_addr_t paddr = priv->rxdescmem_busaddr;
402 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
403 return (dma_addr_t)((uintptr_t)paddr + offs);
404}
405
406#define list_remove_head(list, entry, type, member) \
407 do { \
408 entry = NULL; \
409 if (!list_empty(list)) { \
410 entry = list_entry((list)->next, type, member); \
411 list_del_init(&entry->member); \
412 } \
413 } while (0)
414
415#define list_peek_head(list, entry, type, member) \
416 do { \
417 entry = NULL; \
418 if (!list_empty(list)) { \
419 entry = list_entry((list)->next, type, member); \
420 } \
421 } while (0)
422
423/* adds a tse_buffer to the tail of a tx buffer list.
424 * assumes the caller is managing and holding a mutual exclusion
425 * primitive to avoid simultaneous pushes/pops to the list.
426 */
427static void
428queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
429{
430 list_add_tail(&buffer->lh, &priv->txlisthd);
431}
432
433
434/* adds a tse_buffer to the tail of a rx buffer list
435 * assumes the caller is managing and holding a mutual exclusion
436 * primitive to avoid simultaneous pushes/pops to the list.
437 */
438static void
439queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
440{
441 list_add_tail(&buffer->lh, &priv->rxlisthd);
442}
443
444/* dequeues a tse_buffer from the transmit buffer list, otherwise
445 * returns NULL if empty.
446 * assumes the caller is managing and holding a mutual exclusion
447 * primitive to avoid simultaneous pushes/pops to the list.
448 */
449static struct tse_buffer *
450dequeue_tx(struct altera_tse_private *priv)
451{
452 struct tse_buffer *buffer = NULL;
453 list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
454 return buffer;
455}
456
457/* dequeues a tse_buffer from the receive buffer list, otherwise
458 * returns NULL if empty
459 * assumes the caller is managing and holding a mutual exclusion
460 * primitive to avoid simultaneous pushes/pops to the list.
461 */
462static struct tse_buffer *
463dequeue_rx(struct altera_tse_private *priv)
464{
465 struct tse_buffer *buffer = NULL;
466 list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
467 return buffer;
468}
469
470/* dequeues a tse_buffer from the receive buffer list, otherwise
471 * returns NULL if empty
472 * assumes the caller is managing and holding a mutual exclusion
473 * primitive to avoid simultaneous pushes/pops to the list while the
474 * head is being examined.
475 */
476static struct tse_buffer *
477queue_rx_peekhead(struct altera_tse_private *priv)
478{
479 struct tse_buffer *buffer = NULL;
480 list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
481 return buffer;
482}
483
484/* check and return rx sgdma status without polling
485 */
486static int sgdma_rxbusy(struct altera_tse_private *priv)
487{
488 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
489 return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
490}
491
492/* waits for the tx sgdma to finish it's current operation, returns 0
493 * when it transitions to nonbusy, returns 1 if the operation times out
494 */
495static int sgdma_txbusy(struct altera_tse_private *priv)
496{
497 int delay = 0;
498 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
499
500 /* if DMA is busy, wait for current transactino to finish */
501 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
502 udelay(1);
503
504 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
505 netdev_err(priv->dev, "timeout waiting for tx dma\n");
506 return 1;
507 }
508 return 0;
509}
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
new file mode 100644
index 000000000000..07d471729dc4
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -0,0 +1,35 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_SGDMA_H__
18#define __ALTERA_SGDMA_H__
19
20void sgdma_reset(struct altera_tse_private *);
21void sgdma_enable_txirq(struct altera_tse_private *);
22void sgdma_enable_rxirq(struct altera_tse_private *);
23void sgdma_disable_rxirq(struct altera_tse_private *);
24void sgdma_disable_txirq(struct altera_tse_private *);
25void sgdma_clear_rxirq(struct altera_tse_private *);
26void sgdma_clear_txirq(struct altera_tse_private *);
27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
28u32 sgdma_tx_completions(struct altera_tse_private *);
29int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
30void sgdma_status(struct altera_tse_private *);
31u32 sgdma_rx_status(struct altera_tse_private *);
32int sgdma_initialize(struct altera_tse_private *);
33void sgdma_uninitialize(struct altera_tse_private *);
34
35#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
new file mode 100644
index 000000000000..ba3334f35383
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -0,0 +1,124 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_SGDMAHW_H__
18#define __ALTERA_SGDMAHW_H__
19
20/* SGDMA descriptor structure */
21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */
23 unsigned int pad1;
24 unsigned int waddr;
25 unsigned int pad2;
26 unsigned int next;
27 unsigned int pad3;
28 unsigned short bytes;
29 unsigned char rburst;
30 unsigned char wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */
32
33 /* bit 0: error
34 * bit 1: length error
35 * bit 2: crc error
36 * bit 3: truncated error
37 * bit 4: phy error
38 * bit 5: collision error
39 * bit 6: reserved
40 * bit 7: status eop for recv case
41 */
42 unsigned char status;
43
44 /* bit 0: eop
45 * bit 1: read_fixed
46 * bit 2: write fixed
47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned
49 */
50 unsigned char control;
51} __packed;
52
53
54#define SGDMA_STATUS_ERR BIT(0)
55#define SGDMA_STATUS_LENGTH_ERR BIT(1)
56#define SGDMA_STATUS_CRC_ERR BIT(2)
57#define SGDMA_STATUS_TRUNC_ERR BIT(3)
58#define SGDMA_STATUS_PHY_ERR BIT(4)
59#define SGDMA_STATUS_COLL_ERR BIT(5)
60#define SGDMA_STATUS_EOP BIT(7)
61
62#define SGDMA_CONTROL_EOP BIT(0)
63#define SGDMA_CONTROL_RD_FIXED BIT(1)
64#define SGDMA_CONTROL_WR_FIXED BIT(2)
65
66/* Channel is always 0, so just zero initialize it */
67
68#define SGDMA_CONTROL_HW_OWNED BIT(7)
69
70/* SGDMA register space */
71struct sgdma_csr {
72 /* bit 0: error
73 * bit 1: eop
74 * bit 2: descriptor completed
75 * bit 3: chain completed
76 * bit 4: busy
77 * remainder reserved
78 */
79 u32 status;
80 u32 pad1[3];
81
82 /* bit 0: interrupt on error
83 * bit 1: interrupt on eop
84 * bit 2: interrupt after every descriptor
85 * bit 3: interrupt after last descrip in a chain
86 * bit 4: global interrupt enable
87 * bit 5: starts descriptor processing
88 * bit 6: stop core on dma error
89 * bit 7: interrupt on max descriptors
90 * bits 8-15: max descriptors to generate interrupt
91 * bit 16: Software reset
92 * bit 17: clears owned by hardware if 0, does not clear otherwise
93 * bit 18: enables descriptor polling mode
94 * bit 19-26: clocks before polling again
95 * bit 27-30: reserved
96 * bit 31: clear interrupt
97 */
98 u32 control;
99 u32 pad2[3];
100 u32 next_descrip;
101 u32 pad3[3];
102};
103
104
105#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */
107#define SGDMA_STSREG_DESCRIP BIT(2) /* Descriptor completed */
108#define SGDMA_STSREG_CHAIN BIT(3) /* Chain completed */
109#define SGDMA_STSREG_BUSY BIT(4) /* Controller busy */
110
111#define SGDMA_CTRLREG_IOE BIT(0) /* Interrupt on error */
112#define SGDMA_CTRLREG_IOEOP BIT(1) /* Interrupt on EOP */
113#define SGDMA_CTRLREG_IDESCRIP BIT(2) /* Interrupt after every descriptor */
114#define SGDMA_CTRLREG_ILASTD BIT(3) /* Interrupt after last descriptor */
115#define SGDMA_CTRLREG_INTEN BIT(4) /* Global Interrupt enable */
116#define SGDMA_CTRLREG_START BIT(5) /* starts descriptor processing */
117#define SGDMA_CTRLREG_STOPERR BIT(6) /* stop on dma error */
118#define SGDMA_CTRLREG_INTMAX BIT(7) /* Interrupt on max descriptors */
119#define SGDMA_CTRLREG_RESET BIT(16)/* Software reset */
120#define SGDMA_CTRLREG_COBHW BIT(17)/* Clears owned by hardware */
121#define SGDMA_CTRLREG_POLL BIT(18)/* enables descriptor polling mode */
122#define SGDMA_CTRLREG_CLRINT BIT(31)/* Clears interrupt */
123
124#endif /* __ALTERA_SGDMAHW_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
new file mode 100644
index 000000000000..8feeed05de0e
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -0,0 +1,486 @@
1/* Altera Triple-Speed Ethernet MAC driver
2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
3 *
4 * Contributors:
5 * Dalon Westergreen
6 * Thomas Chou
7 * Ian Abbott
8 * Yuriy Kozlov
9 * Tobias Klauser
10 * Andriy Smolskyy
11 * Roman Bulgakov
12 * Dmytro Mytarchuk
13 * Matthew Gerlach
14 *
15 * Original driver contributed by SLS.
16 * Major updates contributed by GlobalLogic
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms and conditions of the GNU General Public License,
20 * version 2, as published by the Free Software Foundation.
21 *
22 * This program is distributed in the hope it will be useful, but WITHOUT
23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * more details.
26 *
27 * You should have received a copy of the GNU General Public License along with
28 * this program. If not, see <http://www.gnu.org/licenses/>.
29 */
30
31#ifndef __ALTERA_TSE_H__
32#define __ALTERA_TSE_H__
33
34#define ALTERA_TSE_RESOURCE_NAME "altera_tse"
35
36#include <linux/bitops.h>
37#include <linux/if_vlan.h>
38#include <linux/list.h>
39#include <linux/netdevice.h>
40#include <linux/phy.h>
41
42#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
43#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
44 * bytes
45 */
46/* Rx FIFO default settings */
47#define ALTERA_TSE_RX_SECTION_EMPTY 16
48#define ALTERA_TSE_RX_SECTION_FULL 0
49#define ALTERA_TSE_RX_ALMOST_EMPTY 8
50#define ALTERA_TSE_RX_ALMOST_FULL 8
51
52/* Tx FIFO default settings */
53#define ALTERA_TSE_TX_SECTION_EMPTY 16
54#define ALTERA_TSE_TX_SECTION_FULL 0
55#define ALTERA_TSE_TX_ALMOST_EMPTY 8
56#define ALTERA_TSE_TX_ALMOST_FULL 3
57
58/* MAC function configuration default settings */
59#define ALTERA_TSE_TX_IPG_LENGTH 12
60
61#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
62
63/* MAC Command_Config Register Bit Definitions
64 */
65#define MAC_CMDCFG_TX_ENA BIT(0)
66#define MAC_CMDCFG_RX_ENA BIT(1)
67#define MAC_CMDCFG_XON_GEN BIT(2)
68#define MAC_CMDCFG_ETH_SPEED BIT(3)
69#define MAC_CMDCFG_PROMIS_EN BIT(4)
70#define MAC_CMDCFG_PAD_EN BIT(5)
71#define MAC_CMDCFG_CRC_FWD BIT(6)
72#define MAC_CMDCFG_PAUSE_FWD BIT(7)
73#define MAC_CMDCFG_PAUSE_IGNORE BIT(8)
74#define MAC_CMDCFG_TX_ADDR_INS BIT(9)
75#define MAC_CMDCFG_HD_ENA BIT(10)
76#define MAC_CMDCFG_EXCESS_COL BIT(11)
77#define MAC_CMDCFG_LATE_COL BIT(12)
78#define MAC_CMDCFG_SW_RESET BIT(13)
79#define MAC_CMDCFG_MHASH_SEL BIT(14)
80#define MAC_CMDCFG_LOOP_ENA BIT(15)
81#define MAC_CMDCFG_TX_ADDR_SEL(v) (((v) & 0x7) << 16)
82#define MAC_CMDCFG_MAGIC_ENA BIT(19)
83#define MAC_CMDCFG_SLEEP BIT(20)
84#define MAC_CMDCFG_WAKEUP BIT(21)
85#define MAC_CMDCFG_XOFF_GEN BIT(22)
86#define MAC_CMDCFG_CNTL_FRM_ENA BIT(23)
87#define MAC_CMDCFG_NO_LGTH_CHECK BIT(24)
88#define MAC_CMDCFG_ENA_10 BIT(25)
89#define MAC_CMDCFG_RX_ERR_DISC BIT(26)
90#define MAC_CMDCFG_DISABLE_READ_TIMEOUT BIT(27)
91#define MAC_CMDCFG_CNT_RESET BIT(31)
92
93#define MAC_CMDCFG_TX_ENA_GET(v) GET_BIT_VALUE(v, 0)
94#define MAC_CMDCFG_RX_ENA_GET(v) GET_BIT_VALUE(v, 1)
95#define MAC_CMDCFG_XON_GEN_GET(v) GET_BIT_VALUE(v, 2)
96#define MAC_CMDCFG_ETH_SPEED_GET(v) GET_BIT_VALUE(v, 3)
97#define MAC_CMDCFG_PROMIS_EN_GET(v) GET_BIT_VALUE(v, 4)
98#define MAC_CMDCFG_PAD_EN_GET(v) GET_BIT_VALUE(v, 5)
99#define MAC_CMDCFG_CRC_FWD_GET(v) GET_BIT_VALUE(v, 6)
100#define MAC_CMDCFG_PAUSE_FWD_GET(v) GET_BIT_VALUE(v, 7)
101#define MAC_CMDCFG_PAUSE_IGNORE_GET(v) GET_BIT_VALUE(v, 8)
102#define MAC_CMDCFG_TX_ADDR_INS_GET(v) GET_BIT_VALUE(v, 9)
103#define MAC_CMDCFG_HD_ENA_GET(v) GET_BIT_VALUE(v, 10)
104#define MAC_CMDCFG_EXCESS_COL_GET(v) GET_BIT_VALUE(v, 11)
105#define MAC_CMDCFG_LATE_COL_GET(v) GET_BIT_VALUE(v, 12)
106#define MAC_CMDCFG_SW_RESET_GET(v) GET_BIT_VALUE(v, 13)
107#define MAC_CMDCFG_MHASH_SEL_GET(v) GET_BIT_VALUE(v, 14)
108#define MAC_CMDCFG_LOOP_ENA_GET(v) GET_BIT_VALUE(v, 15)
109#define MAC_CMDCFG_TX_ADDR_SEL_GET(v) (((v) >> 16) & 0x7)
110#define MAC_CMDCFG_MAGIC_ENA_GET(v) GET_BIT_VALUE(v, 19)
111#define MAC_CMDCFG_SLEEP_GET(v) GET_BIT_VALUE(v, 20)
112#define MAC_CMDCFG_WAKEUP_GET(v) GET_BIT_VALUE(v, 21)
113#define MAC_CMDCFG_XOFF_GEN_GET(v) GET_BIT_VALUE(v, 22)
114#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v) GET_BIT_VALUE(v, 23)
115#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v) GET_BIT_VALUE(v, 24)
116#define MAC_CMDCFG_ENA_10_GET(v) GET_BIT_VALUE(v, 25)
117#define MAC_CMDCFG_RX_ERR_DISC_GET(v) GET_BIT_VALUE(v, 26)
118#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
119#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
120
121/* MDIO registers within MAC register Space
122 */
123struct altera_tse_mdio {
124 u32 control; /* PHY device operation control register */
125 u32 status; /* PHY device operation status register */
126 u32 phy_id1; /* Bits 31:16 of PHY identifier */
127 u32 phy_id2; /* Bits 15:0 of PHY identifier */
128 u32 auto_negotiation_advertisement; /* Auto-negotiation
129 * advertisement
130 * register
131 */
132 u32 remote_partner_base_page_ability;
133
134 u32 reg6;
135 u32 reg7;
136 u32 reg8;
137 u32 reg9;
138 u32 rega;
139 u32 regb;
140 u32 regc;
141 u32 regd;
142 u32 rege;
143 u32 regf;
144 u32 reg10;
145 u32 reg11;
146 u32 reg12;
147 u32 reg13;
148 u32 reg14;
149 u32 reg15;
150 u32 reg16;
151 u32 reg17;
152 u32 reg18;
153 u32 reg19;
154 u32 reg1a;
155 u32 reg1b;
156 u32 reg1c;
157 u32 reg1d;
158 u32 reg1e;
159 u32 reg1f;
160};
161
162/* MAC register Space. Note that some of these registers may or may not be
163 * present depending upon options chosen by the user when the core was
164 * configured and built. Please consult the Altera Triple Speed Ethernet User
165 * Guide for details.
166 */
167struct altera_tse_mac {
168 /* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer
169 * specific revision
170 */
171 u32 megacore_revision;
172 /* Provides a memory location for user applications to test the device
173 * memory operation.
174 */
175 u32 scratch_pad;
176 /* The host processor uses this register to control and configure the
177 * MAC block
178 */
179 u32 command_config;
180 /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary
181 * MAC address
182 */
183 u32 mac_addr_0;
184 /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary
185 * MAC address
186 */
187 u32 mac_addr_1;
188 /* 14-bit maximum frame length. The MAC receive logic */
189 u32 frm_length;
190 /* The pause quanta is used in each pause frame sent to a remote
191 * Ethernet device, in increments of 512 Ethernet bit times
192 */
193 u32 pause_quanta;
194 /* 12-bit receive FIFO section-empty threshold */
195 u32 rx_section_empty;
196 /* 12-bit receive FIFO section-full threshold */
197 u32 rx_section_full;
198 /* 12-bit transmit FIFO section-empty threshold */
199 u32 tx_section_empty;
200 /* 12-bit transmit FIFO section-full threshold */
201 u32 tx_section_full;
202 /* 12-bit receive FIFO almost-empty threshold */
203 u32 rx_almost_empty;
204 /* 12-bit receive FIFO almost-full threshold */
205 u32 rx_almost_full;
206 /* 12-bit transmit FIFO almost-empty threshold */
207 u32 tx_almost_empty;
208 /* 12-bit transmit FIFO almost-full threshold */
209 u32 tx_almost_full;
210 /* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */
211 u32 mdio_phy0_addr;
212 /* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */
213 u32 mdio_phy1_addr;
214
215 /* Bit[15:0]—16-bit holdoff quanta */
216 u32 holdoff_quant;
217
218 /* only if 100/1000 BaseX PCS, reserved otherwise */
219 u32 reserved1[5];
220
221 /* Minimum IPG between consecutive transmit frame in terms of bytes */
222 u32 tx_ipg_length;
223
224 /* IEEE 802.3 oEntity Managed Object Support */
225
226 /* The MAC addresses */
227 u32 mac_id_1;
228 u32 mac_id_2;
229
230 /* Number of frames transmitted without error including pause frames */
231 u32 frames_transmitted_ok;
232 /* Number of frames received without error including pause frames */
233 u32 frames_received_ok;
234 /* Number of frames received with a CRC error */
235 u32 frames_check_sequence_errors;
236 /* Frame received with an alignment error */
237 u32 alignment_errors;
238 /* Sum of payload and padding octets of frames transmitted without
239 * error
240 */
241 u32 octets_transmitted_ok;
242 /* Sum of payload and padding octets of frames received without error */
243 u32 octets_received_ok;
244
245 /* IEEE 802.3 oPausedEntity Managed Object Support */
246
247 /* Number of transmitted pause frames */
248 u32 tx_pause_mac_ctrl_frames;
249 /* Number of Received pause frames */
250 u32 rx_pause_mac_ctrl_frames;
251
252 /* IETF MIB (MIB-II) Object Support */
253
254 /* Number of frames received with error */
255 u32 if_in_errors;
256 /* Number of frames transmitted with error */
257 u32 if_out_errors;
258 /* Number of valid received unicast frames */
259 u32 if_in_ucast_pkts;
260 /* Number of valid received multicasts frames (without pause) */
261 u32 if_in_multicast_pkts;
262 /* Number of valid received broadcast frames */
263 u32 if_in_broadcast_pkts;
264 u32 if_out_discards;
265 /* The number of valid unicast frames transmitted */
266 u32 if_out_ucast_pkts;
267 /* The number of valid multicast frames transmitted,
268 * excluding pause frames
269 */
270 u32 if_out_multicast_pkts;
271 u32 if_out_broadcast_pkts;
272
273 /* IETF RMON MIB Object Support */
274
275 /* Counts the number of dropped packets due to internal errors
276 * of the MAC client.
277 */
278 u32 ether_stats_drop_events;
279 /* Total number of bytes received. Good and bad frames. */
280 u32 ether_stats_octets;
281 /* Total number of packets received. Counts good and bad packets. */
282 u32 ether_stats_pkts;
283 /* Number of packets received with less than 64 bytes. */
284 u32 ether_stats_undersize_pkts;
285 /* The number of frames received that are longer than the
286 * value configured in the frm_length register
287 */
288 u32 ether_stats_oversize_pkts;
289 /* Number of received packet with 64 bytes */
290 u32 ether_stats_pkts_64_octets;
291 /* Frames (good and bad) with 65 to 127 bytes */
292 u32 ether_stats_pkts_65to127_octets;
293 /* Frames (good and bad) with 128 to 255 bytes */
294 u32 ether_stats_pkts_128to255_octets;
295 /* Frames (good and bad) with 256 to 511 bytes */
296 u32 ether_stats_pkts_256to511_octets;
297 /* Frames (good and bad) with 512 to 1023 bytes */
298 u32 ether_stats_pkts_512to1023_octets;
299 /* Frames (good and bad) with 1024 to 1518 bytes */
300 u32 ether_stats_pkts_1024to1518_octets;
301
302 /* Any frame length from 1519 to the maximum length configured in the
303 * frm_length register, if it is greater than 1518
304 */
305 u32 ether_stats_pkts_1519tox_octets;
306 /* Too long frames with CRC error */
307 u32 ether_stats_jabbers;
308 /* Too short frames with CRC error */
309 u32 ether_stats_fragments;
310
311 u32 reserved2;
312
313 /* FIFO control register */
314 u32 tx_cmd_stat;
315 u32 rx_cmd_stat;
316
317 /* Extended Statistics Counters */
318 u32 msb_octets_transmitted_ok;
319 u32 msb_octets_received_ok;
320 u32 msb_ether_stats_octets;
321
322 u32 reserved3;
323
324 /* Multicast address resolution table, mapped in the controller address
325 * space
326 */
327 u32 hash_table[64];
328
329 /* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY
330 * management interface
331 */
332 struct altera_tse_mdio mdio_phy0;
333 struct altera_tse_mdio mdio_phy1;
334
335 /* 4 Supplemental MAC Addresses */
336 u32 supp_mac_addr_0_0;
337 u32 supp_mac_addr_0_1;
338 u32 supp_mac_addr_1_0;
339 u32 supp_mac_addr_1_1;
340 u32 supp_mac_addr_2_0;
341 u32 supp_mac_addr_2_1;
342 u32 supp_mac_addr_3_0;
343 u32 supp_mac_addr_3_1;
344
345 u32 reserved4[8];
346
347 /* IEEE 1588v2 Feature */
348 u32 tx_period;
349 u32 tx_adjust_fns;
350 u32 tx_adjust_ns;
351 u32 rx_period;
352 u32 rx_adjust_fns;
353 u32 rx_adjust_ns;
354
355 u32 reserved5[42];
356};
357
358/* Transmit and Receive Command Registers Bit Definitions
359 */
360#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
361#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 BIT(18)
362#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16 BIT(25)
363
364/* Wrapper around a pointer to a socket buffer,
365 * so a DMA handle can be stored along with the buffer
366 */
367struct tse_buffer {
368 struct list_head lh;
369 struct sk_buff *skb;
370 dma_addr_t dma_addr;
371 u32 len;
372 int mapped_as_page;
373};
374
375struct altera_tse_private;
376
377#define ALTERA_DTYPE_SGDMA 1
378#define ALTERA_DTYPE_MSGDMA 2
379
380/* standard DMA interface for SGDMA and MSGDMA */
381struct altera_dmaops {
382 int altera_dtype;
383 int dmamask;
384 void (*reset_dma)(struct altera_tse_private *);
385 void (*enable_txirq)(struct altera_tse_private *);
386 void (*enable_rxirq)(struct altera_tse_private *);
387 void (*disable_txirq)(struct altera_tse_private *);
388 void (*disable_rxirq)(struct altera_tse_private *);
389 void (*clear_txirq)(struct altera_tse_private *);
390 void (*clear_rxirq)(struct altera_tse_private *);
391 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
392 u32 (*tx_completions)(struct altera_tse_private *);
393 int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
394 u32 (*get_rx_status)(struct altera_tse_private *);
395 int (*init_dma)(struct altera_tse_private *);
396 void (*uninit_dma)(struct altera_tse_private *);
397};
398
399/* This structure is private to each device.
400 */
401struct altera_tse_private {
402 struct net_device *dev;
403 struct device *device;
404 struct napi_struct napi;
405
406 /* MAC address space */
407 struct altera_tse_mac __iomem *mac_dev;
408
409 /* TSE Revision */
410 u32 revision;
411
412 /* mSGDMA Rx Dispatcher address space */
413 void __iomem *rx_dma_csr;
414 void __iomem *rx_dma_desc;
415 void __iomem *rx_dma_resp;
416
417 /* mSGDMA Tx Dispatcher address space */
418 void __iomem *tx_dma_csr;
419 void __iomem *tx_dma_desc;
420
421 /* Rx buffers queue */
422 struct tse_buffer *rx_ring;
423 u32 rx_cons;
424 u32 rx_prod;
425 u32 rx_ring_size;
426 u32 rx_dma_buf_sz;
427
428 /* Tx ring buffer */
429 struct tse_buffer *tx_ring;
430 u32 tx_prod;
431 u32 tx_cons;
432 u32 tx_ring_size;
433
434 /* Interrupts */
435 u32 tx_irq;
436 u32 rx_irq;
437
438 /* RX/TX MAC FIFO configs */
439 u32 tx_fifo_depth;
440 u32 rx_fifo_depth;
441 u32 max_mtu;
442
443 /* Hash filter settings */
444 u32 hash_filter;
445 u32 added_unicast;
446
447 /* Descriptor memory info for managing SGDMA */
448 u32 txdescmem;
449 u32 rxdescmem;
450 dma_addr_t rxdescmem_busaddr;
451 dma_addr_t txdescmem_busaddr;
452 u32 txctrlreg;
453 u32 rxctrlreg;
454 dma_addr_t rxdescphys;
455 dma_addr_t txdescphys;
456
457 struct list_head txlisthd;
458 struct list_head rxlisthd;
459
460 /* MAC command_config register protection */
461 spinlock_t mac_cfg_lock;
462 /* Tx path protection */
463 spinlock_t tx_lock;
464 /* Rx DMA & interrupt control protection */
465 spinlock_t rxdma_irq_lock;
466
467 /* PHY */
468 int phy_addr; /* PHY's MDIO address, -1 for autodetection */
469 phy_interface_t phy_iface;
470 struct mii_bus *mdio;
471 struct phy_device *phydev;
472 int oldspeed;
473 int oldduplex;
474 int oldlink;
475
476 /* ethtool msglvl option */
477 u32 msg_enable;
478
479 struct altera_dmaops *dmaops;
480};
481
482/* Function prototypes
483 */
484void altera_tse_set_ethtool_ops(struct net_device *);
485
486#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
new file mode 100644
index 000000000000..319ca74f5e74
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -0,0 +1,235 @@
1/* Ethtool support for Altera Triple-Speed Ethernet MAC driver
2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
3 *
4 * Contributors:
5 * Dalon Westergreen
6 * Thomas Chou
7 * Ian Abbott
8 * Yuriy Kozlov
9 * Tobias Klauser
10 * Andriy Smolskyy
11 * Roman Bulgakov
12 * Dmytro Mytarchuk
13 *
14 * Original driver contributed by SLS.
15 * Major updates contributed by GlobalLogic
16 *
17 * This program is free software; you can redistribute it and/or modify it
18 * under the terms and conditions of the GNU General Public License,
19 * version 2, as published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24 * more details.
25 *
26 * You should have received a copy of the GNU General Public License along with
27 * this program. If not, see <http://www.gnu.org/licenses/>.
28 */
29
30#include <linux/ethtool.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/phy.h>
34
35#include "altera_tse.h"
36
37#define TSE_STATS_LEN 31
38#define TSE_NUM_REGS 128
39
40static char const stat_gstrings[][ETH_GSTRING_LEN] = {
41 "tx_packets",
42 "rx_packets",
43 "rx_crc_errors",
44 "rx_align_errors",
45 "tx_bytes",
46 "rx_bytes",
47 "tx_pause",
48 "rx_pause",
49 "rx_errors",
50 "tx_errors",
51 "rx_unicast",
52 "rx_multicast",
53 "rx_broadcast",
54 "tx_discards",
55 "tx_unicast",
56 "tx_multicast",
57 "tx_broadcast",
58 "ether_drops",
59 "rx_total_bytes",
60 "rx_total_packets",
61 "rx_undersize",
62 "rx_oversize",
63 "rx_64_bytes",
64 "rx_65_127_bytes",
65 "rx_128_255_bytes",
66 "rx_256_511_bytes",
67 "rx_512_1023_bytes",
68 "rx_1024_1518_bytes",
69 "rx_gte_1519_bytes",
70 "rx_jabbers",
71 "rx_runts",
72};
73
74static void tse_get_drvinfo(struct net_device *dev,
75 struct ethtool_drvinfo *info)
76{
77 struct altera_tse_private *priv = netdev_priv(dev);
78 u32 rev = ioread32(&priv->mac_dev->megacore_revision);
79
80 strcpy(info->driver, "Altera TSE MAC IP Driver");
81 strcpy(info->version, "v8.0");
82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
84 sprintf(info->bus_info, "platform");
85}
86
87/* Fill in a buffer with the strings which correspond to the
88 * stats
89 */
90static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf)
91{
92 memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN);
93}
94
95static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf)
97{
98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext;
101
102 buf[0] = ioread32(&mac->frames_transmitted_ok);
103 buf[1] = ioread32(&mac->frames_received_ok);
104 buf[2] = ioread32(&mac->frames_check_sequence_errors);
105 buf[3] = ioread32(&mac->alignment_errors);
106
107 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
109 ext |= ioread32(&mac->octets_transmitted_ok);
110 buf[4] = ext;
111
112 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
114 ext |= ioread32(&mac->octets_received_ok);
115 buf[5] = ext;
116
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
119 buf[8] = ioread32(&mac->if_in_errors);
120 buf[9] = ioread32(&mac->if_out_errors);
121 buf[10] = ioread32(&mac->if_in_ucast_pkts);
122 buf[11] = ioread32(&mac->if_in_multicast_pkts);
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts);
124 buf[13] = ioread32(&mac->if_out_discards);
125 buf[14] = ioread32(&mac->if_out_ucast_pkts);
126 buf[15] = ioread32(&mac->if_out_multicast_pkts);
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts);
128 buf[17] = ioread32(&mac->ether_stats_drop_events);
129
130 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
132 ext |= ioread32(&mac->ether_stats_octets);
133 buf[18] = ext;
134
135 buf[19] = ioread32(&mac->ether_stats_pkts);
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
145 buf[29] = ioread32(&mac->ether_stats_jabbers);
146 buf[30] = ioread32(&mac->ether_stats_fragments);
147}
148
149static int tse_sset_count(struct net_device *dev, int sset)
150{
151 switch (sset) {
152 case ETH_SS_STATS:
153 return TSE_STATS_LEN;
154 default:
155 return -EOPNOTSUPP;
156 }
157}
158
159static u32 tse_get_msglevel(struct net_device *dev)
160{
161 struct altera_tse_private *priv = netdev_priv(dev);
162 return priv->msg_enable;
163}
164
165static void tse_set_msglevel(struct net_device *dev, uint32_t data)
166{
167 struct altera_tse_private *priv = netdev_priv(dev);
168 priv->msg_enable = data;
169}
170
171static int tse_reglen(struct net_device *dev)
172{
173 return TSE_NUM_REGS * sizeof(u32);
174}
175
176static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
177 void *regbuf)
178{
179 int i;
180 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf;
183
184 /* Set version to a known value, so ethtool knows
185 * how to do any special formatting of this data.
186 * This version number will need to change if and
187 * when this register table is changed.
188 */
189
190 regs->version = 1;
191
192 for (i = 0; i < TSE_NUM_REGS; i++)
193 buf[i] = ioread32(&tse_mac_regs[i]);
194}
195
196static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
197{
198 struct altera_tse_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev;
200
201 if (phydev == NULL)
202 return -ENODEV;
203
204 return phy_ethtool_gset(phydev, cmd);
205}
206
207static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
208{
209 struct altera_tse_private *priv = netdev_priv(dev);
210 struct phy_device *phydev = priv->phydev;
211
212 if (phydev == NULL)
213 return -ENODEV;
214
215 return phy_ethtool_sset(phydev, cmd);
216}
217
218static const struct ethtool_ops tse_ethtool_ops = {
219 .get_drvinfo = tse_get_drvinfo,
220 .get_regs_len = tse_reglen,
221 .get_regs = tse_get_regs,
222 .get_link = ethtool_op_get_link,
223 .get_settings = tse_get_settings,
224 .set_settings = tse_set_settings,
225 .get_strings = tse_gstrings,
226 .get_sset_count = tse_sset_count,
227 .get_ethtool_stats = tse_fill_stats,
228 .get_msglevel = tse_get_msglevel,
229 .set_msglevel = tse_set_msglevel,
230};
231
232void altera_tse_set_ethtool_ops(struct net_device *netdev)
233{
234 SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
235}
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
new file mode 100644
index 000000000000..c70a29e0b9f7
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -0,0 +1,1543 @@
1/* Altera Triple-Speed Ethernet MAC driver
2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
3 *
4 * Contributors:
5 * Dalon Westergreen
6 * Thomas Chou
7 * Ian Abbott
8 * Yuriy Kozlov
9 * Tobias Klauser
10 * Andriy Smolskyy
11 * Roman Bulgakov
12 * Dmytro Mytarchuk
13 * Matthew Gerlach
14 *
15 * Original driver contributed by SLS.
16 * Major updates contributed by GlobalLogic
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms and conditions of the GNU General Public License,
20 * version 2, as published by the Free Software Foundation.
21 *
22 * This program is distributed in the hope it will be useful, but WITHOUT
23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * more details.
26 *
27 * You should have received a copy of the GNU General Public License along with
28 * this program. If not, see <http://www.gnu.org/licenses/>.
29 */
30
31#include <linux/atomic.h>
32#include <linux/delay.h>
33#include <linux/etherdevice.h>
34#include <linux/if_vlan.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/of_device.h>
42#include <linux/of_mdio.h>
43#include <linux/of_net.h>
44#include <linux/of_platform.h>
45#include <linux/phy.h>
46#include <linux/platform_device.h>
47#include <linux/skbuff.h>
48#include <asm/cacheflush.h>
49
50#include "altera_utils.h"
51#include "altera_tse.h"
52#include "altera_sgdma.h"
53#include "altera_msgdma.h"
54
55static atomic_t instance_count = ATOMIC_INIT(~0);
56/* Module parameters */
57static int debug = -1;
58module_param(debug, int, S_IRUGO | S_IWUSR);
59MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
60
61static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
62 NETIF_MSG_LINK | NETIF_MSG_IFUP |
63 NETIF_MSG_IFDOWN);
64
65#define RX_DESCRIPTORS 64
66static int dma_rx_num = RX_DESCRIPTORS;
67module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
68MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
69
70#define TX_DESCRIPTORS 64
71static int dma_tx_num = TX_DESCRIPTORS;
72module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
73MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
74
75
76#define POLL_PHY (-1)
77
78/* Make sure DMA buffer size is larger than the max frame size
79 * plus some alignment offset and a VLAN header. If the max frame size is
80 * 1518, a VLAN header would be additional 4 bytes and additional
81 * headroom for alignment is 2 bytes, 2048 is just fine.
82 */
83#define ALTERA_RXDMABUFFER_SIZE 2048
84
85/* Allow network stack to resume queueing packets after we've
86 * finished transmitting at least 1/4 of the packets in the queue.
87 */
88#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
89
90#define TXQUEUESTOP_THRESHHOLD 2
91
92static struct of_device_id altera_tse_ids[];
93
94static inline u32 tse_tx_avail(struct altera_tse_private *priv)
95{
96 return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
97}
98
99/* MDIO specific functions
100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
105 u32 data;
106
107 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
109
110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff;
112 return data;
113}
114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value)
117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
120
121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
123
124 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]);
126 return 0;
127}
128
129static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
130{
131 struct altera_tse_private *priv = netdev_priv(dev);
132 int ret;
133 int i;
134 struct device_node *mdio_node = NULL;
135 struct mii_bus *mdio = NULL;
136 struct device_node *child_node = NULL;
137
138 for_each_child_of_node(priv->device->of_node, child_node) {
139 if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
140 mdio_node = child_node;
141 break;
142 }
143 }
144
145 if (mdio_node) {
146 netdev_dbg(dev, "FOUND MDIO subnode\n");
147 } else {
148 netdev_dbg(dev, "NO MDIO subnode\n");
149 return 0;
150 }
151
152 mdio = mdiobus_alloc();
153 if (mdio == NULL) {
154 netdev_err(dev, "Error allocating MDIO bus\n");
155 return -ENOMEM;
156 }
157
158 mdio->name = ALTERA_TSE_RESOURCE_NAME;
159 mdio->read = &altera_tse_mdio_read;
160 mdio->write = &altera_tse_mdio_write;
161 snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
162
163 mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
164 if (mdio->irq == NULL) {
165 ret = -ENOMEM;
166 goto out_free_mdio;
167 }
168 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL;
170
171 mdio->priv = priv->mac_dev;
172 mdio->parent = priv->device;
173
174 ret = of_mdiobus_register(mdio, mdio_node);
175 if (ret != 0) {
176 netdev_err(dev, "Cannot register MDIO bus %s\n",
177 mdio->id);
178 goto out_free_mdio_irq;
179 }
180
181 if (netif_msg_drv(priv))
182 netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
183
184 priv->mdio = mdio;
185 return 0;
186out_free_mdio_irq:
187 kfree(mdio->irq);
188out_free_mdio:
189 mdiobus_free(mdio);
190 mdio = NULL;
191 return ret;
192}
193
194static void altera_tse_mdio_destroy(struct net_device *dev)
195{
196 struct altera_tse_private *priv = netdev_priv(dev);
197
198 if (priv->mdio == NULL)
199 return;
200
201 if (netif_msg_drv(priv))
202 netdev_info(dev, "MDIO bus %s: removed\n",
203 priv->mdio->id);
204
205 mdiobus_unregister(priv->mdio);
206 kfree(priv->mdio->irq);
207 mdiobus_free(priv->mdio);
208 priv->mdio = NULL;
209}
210
211static int tse_init_rx_buffer(struct altera_tse_private *priv,
212 struct tse_buffer *rxbuffer, int len)
213{
214 rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
215 if (!rxbuffer->skb)
216 return -ENOMEM;
217
218 rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
219 len,
220 DMA_FROM_DEVICE);
221
222 if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
223 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
224 dev_kfree_skb_any(rxbuffer->skb);
225 return -EINVAL;
226 }
227 rxbuffer->len = len;
228 return 0;
229}
230
231static void tse_free_rx_buffer(struct altera_tse_private *priv,
232 struct tse_buffer *rxbuffer)
233{
234 struct sk_buff *skb = rxbuffer->skb;
235 dma_addr_t dma_addr = rxbuffer->dma_addr;
236
237 if (skb != NULL) {
238 if (dma_addr)
239 dma_unmap_single(priv->device, dma_addr,
240 rxbuffer->len,
241 DMA_FROM_DEVICE);
242 dev_kfree_skb_any(skb);
243 rxbuffer->skb = NULL;
244 rxbuffer->dma_addr = 0;
245 }
246}
247
248/* Unmap and free Tx buffer resources
249 */
250static void tse_free_tx_buffer(struct altera_tse_private *priv,
251 struct tse_buffer *buffer)
252{
253 if (buffer->dma_addr) {
254 if (buffer->mapped_as_page)
255 dma_unmap_page(priv->device, buffer->dma_addr,
256 buffer->len, DMA_TO_DEVICE);
257 else
258 dma_unmap_single(priv->device, buffer->dma_addr,
259 buffer->len, DMA_TO_DEVICE);
260 buffer->dma_addr = 0;
261 }
262 if (buffer->skb) {
263 dev_kfree_skb_any(buffer->skb);
264 buffer->skb = NULL;
265 }
266}
267
268static int alloc_init_skbufs(struct altera_tse_private *priv)
269{
270 unsigned int rx_descs = priv->rx_ring_size;
271 unsigned int tx_descs = priv->tx_ring_size;
272 int ret = -ENOMEM;
273 int i;
274
275 /* Create Rx ring buffer */
276 priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
277 GFP_KERNEL);
278 if (!priv->rx_ring)
279 goto err_rx_ring;
280
281 /* Create Tx ring buffer */
282 priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
283 GFP_KERNEL);
284 if (!priv->tx_ring)
285 goto err_tx_ring;
286
287 priv->tx_cons = 0;
288 priv->tx_prod = 0;
289
290 /* Init Rx ring */
291 for (i = 0; i < rx_descs; i++) {
292 ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
293 priv->rx_dma_buf_sz);
294 if (ret)
295 goto err_init_rx_buffers;
296 }
297
298 priv->rx_cons = 0;
299 priv->rx_prod = 0;
300
301 return 0;
302err_init_rx_buffers:
303 while (--i >= 0)
304 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
305 kfree(priv->tx_ring);
306err_tx_ring:
307 kfree(priv->rx_ring);
308err_rx_ring:
309 return ret;
310}
311
312static void free_skbufs(struct net_device *dev)
313{
314 struct altera_tse_private *priv = netdev_priv(dev);
315 unsigned int rx_descs = priv->rx_ring_size;
316 unsigned int tx_descs = priv->tx_ring_size;
317 int i;
318
319 /* Release the DMA TX/RX socket buffers */
320 for (i = 0; i < rx_descs; i++)
321 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
322 for (i = 0; i < tx_descs; i++)
323 tse_free_tx_buffer(priv, &priv->tx_ring[i]);
324
325
326 kfree(priv->tx_ring);
327}
328
329/* Reallocate the skb for the reception process
330 */
331static inline void tse_rx_refill(struct altera_tse_private *priv)
332{
333 unsigned int rxsize = priv->rx_ring_size;
334 unsigned int entry;
335 int ret;
336
337 for (; priv->rx_cons - priv->rx_prod > 0;
338 priv->rx_prod++) {
339 entry = priv->rx_prod % rxsize;
340 if (likely(priv->rx_ring[entry].skb == NULL)) {
341 ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
342 priv->rx_dma_buf_sz);
343 if (unlikely(ret != 0))
344 break;
345 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
346 }
347 }
348}
349
350/* Pull out the VLAN tag and fix up the packet
351 */
352static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
353{
354 struct ethhdr *eth_hdr;
355 u16 vid;
356 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
357 !__vlan_get_tag(skb, &vid)) {
358 eth_hdr = (struct ethhdr *)skb->data;
359 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
360 skb_pull(skb, VLAN_HLEN);
361 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
362 }
363}
364
365/* Receive a packet: retrieve and pass over to upper levels
366 */
367static int tse_rx(struct altera_tse_private *priv, int limit)
368{
369 unsigned int count = 0;
370 unsigned int next_entry;
371 struct sk_buff *skb;
372 unsigned int entry = priv->rx_cons % priv->rx_ring_size;
373 u32 rxstatus;
374 u16 pktlength;
375 u16 pktstatus;
376
377 while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
378 pktstatus = rxstatus >> 16;
379 pktlength = rxstatus & 0xffff;
380
381 if ((pktstatus & 0xFF) || (pktlength == 0))
382 netdev_err(priv->dev,
383 "RCV pktstatus %08X pktlength %08X\n",
384 pktstatus, pktlength);
385
386 count++;
387 next_entry = (++priv->rx_cons) % priv->rx_ring_size;
388
389 skb = priv->rx_ring[entry].skb;
390 if (unlikely(!skb)) {
391 netdev_err(priv->dev,
392 "%s: Inconsistent Rx descriptor chain\n",
393 __func__);
394 priv->dev->stats.rx_dropped++;
395 break;
396 }
397 priv->rx_ring[entry].skb = NULL;
398
399 skb_put(skb, pktlength);
400
401 /* make cache consistent with receive packet buffer */
402 dma_sync_single_for_cpu(priv->device,
403 priv->rx_ring[entry].dma_addr,
404 priv->rx_ring[entry].len,
405 DMA_FROM_DEVICE);
406
407 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
408 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
409
410 if (netif_msg_pktdata(priv)) {
411 netdev_info(priv->dev, "frame received %d bytes\n",
412 pktlength);
413 print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
414 16, 1, skb->data, pktlength, true);
415 }
416
417 tse_rx_vlan(priv->dev, skb);
418
419 skb->protocol = eth_type_trans(skb, priv->dev);
420 skb_checksum_none_assert(skb);
421
422 napi_gro_receive(&priv->napi, skb);
423
424 priv->dev->stats.rx_packets++;
425 priv->dev->stats.rx_bytes += pktlength;
426
427 entry = next_entry;
428 }
429
430 tse_rx_refill(priv);
431 return count;
432}
433
434/* Reclaim resources after transmission completes
435 */
436static int tse_tx_complete(struct altera_tse_private *priv)
437{
438 unsigned int txsize = priv->tx_ring_size;
439 u32 ready;
440 unsigned int entry;
441 struct tse_buffer *tx_buff;
442 int txcomplete = 0;
443
444 spin_lock(&priv->tx_lock);
445
446 ready = priv->dmaops->tx_completions(priv);
447
448 /* Free sent buffers */
449 while (ready && (priv->tx_cons != priv->tx_prod)) {
450 entry = priv->tx_cons % txsize;
451 tx_buff = &priv->tx_ring[entry];
452
453 if (netif_msg_tx_done(priv))
454 netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
455 __func__, priv->tx_prod, priv->tx_cons);
456
457 if (likely(tx_buff->skb))
458 priv->dev->stats.tx_packets++;
459
460 tse_free_tx_buffer(priv, tx_buff);
461 priv->tx_cons++;
462
463 txcomplete++;
464 ready--;
465 }
466
467 if (unlikely(netif_queue_stopped(priv->dev) &&
468 tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
469 netif_tx_lock(priv->dev);
470 if (netif_queue_stopped(priv->dev) &&
471 tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
472 if (netif_msg_tx_done(priv))
473 netdev_dbg(priv->dev, "%s: restart transmit\n",
474 __func__);
475 netif_wake_queue(priv->dev);
476 }
477 netif_tx_unlock(priv->dev);
478 }
479
480 spin_unlock(&priv->tx_lock);
481 return txcomplete;
482}
483
484/* NAPI polling function
485 */
486static int tse_poll(struct napi_struct *napi, int budget)
487{
488 struct altera_tse_private *priv =
489 container_of(napi, struct altera_tse_private, napi);
490 int rxcomplete = 0;
491 int txcomplete = 0;
492 unsigned long int flags;
493
494 txcomplete = tse_tx_complete(priv);
495
496 rxcomplete = tse_rx(priv, budget);
497
498 if (rxcomplete >= budget || txcomplete > 0)
499 return rxcomplete;
500
501 napi_gro_flush(napi, false);
502 __napi_complete(napi);
503
504 netdev_dbg(priv->dev,
505 "NAPI Complete, did %d packets with budget %d\n",
506 txcomplete+rxcomplete, budget);
507
508 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
509 priv->dmaops->enable_rxirq(priv);
510 priv->dmaops->enable_txirq(priv);
511 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
512 return rxcomplete + txcomplete;
513}
514
515/* DMA TX & RX FIFO interrupt routing
516 */
517static irqreturn_t altera_isr(int irq, void *dev_id)
518{
519 struct net_device *dev = dev_id;
520 struct altera_tse_private *priv;
521 unsigned long int flags;
522
523
524 if (unlikely(!dev)) {
525 pr_err("%s: invalid dev pointer\n", __func__);
526 return IRQ_NONE;
527 }
528 priv = netdev_priv(dev);
529
530 /* turn off desc irqs and enable napi rx */
531 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
532
533 if (likely(napi_schedule_prep(&priv->napi))) {
534 priv->dmaops->disable_rxirq(priv);
535 priv->dmaops->disable_txirq(priv);
536 __napi_schedule(&priv->napi);
537 }
538
539 /* reset IRQs */
540 priv->dmaops->clear_rxirq(priv);
541 priv->dmaops->clear_txirq(priv);
542
543 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
544
545 return IRQ_HANDLED;
546}
547
548/* Transmit a packet (called by the kernel). Dispatches
549 * either the SGDMA method for transmitting or the
550 * MSGDMA method, assumes no scatter/gather support,
551 * implying an assumption that there's only one
552 * physically contiguous fragment starting at
553 * skb->data, for length of skb_headlen(skb).
554 */
555static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
556{
557 struct altera_tse_private *priv = netdev_priv(dev);
558 unsigned int txsize = priv->tx_ring_size;
559 unsigned int entry;
560 struct tse_buffer *buffer = NULL;
561 int nfrags = skb_shinfo(skb)->nr_frags;
562 unsigned int nopaged_len = skb_headlen(skb);
563 enum netdev_tx ret = NETDEV_TX_OK;
564 dma_addr_t dma_addr;
565 int txcomplete = 0;
566
567 spin_lock_bh(&priv->tx_lock);
568
569 if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
570 if (!netif_queue_stopped(dev)) {
571 netif_stop_queue(dev);
572 /* This is a hard error, log it. */
573 netdev_err(priv->dev,
574 "%s: Tx list full when queue awake\n",
575 __func__);
576 }
577 ret = NETDEV_TX_BUSY;
578 goto out;
579 }
580
581 /* Map the first skb fragment */
582 entry = priv->tx_prod % txsize;
583 buffer = &priv->tx_ring[entry];
584
585 dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
586 DMA_TO_DEVICE);
587 if (dma_mapping_error(priv->device, dma_addr)) {
588 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
589 ret = NETDEV_TX_OK;
590 goto out;
591 }
592
593 buffer->skb = skb;
594 buffer->dma_addr = dma_addr;
595 buffer->len = nopaged_len;
596
597 /* Push data out of the cache hierarchy into main memory */
598 dma_sync_single_for_device(priv->device, buffer->dma_addr,
599 buffer->len, DMA_TO_DEVICE);
600
601 txcomplete = priv->dmaops->tx_buffer(priv, buffer);
602
603 skb_tx_timestamp(skb);
604
605 priv->tx_prod++;
606 dev->stats.tx_bytes += skb->len;
607
608 if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
609 if (netif_msg_hw(priv))
610 netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
611 __func__);
612 netif_stop_queue(dev);
613 }
614
615out:
616 spin_unlock_bh(&priv->tx_lock);
617
618 return ret;
619}
620
621/* Called every time the controller might need to be made
622 * aware of new link state. The PHY code conveys this
623 * information through variables in the phydev structure, and this
624 * function converts those variables into the appropriate
625 * register values, and can bring down the device if needed.
626 */
627static void altera_tse_adjust_link(struct net_device *dev)
628{
629 struct altera_tse_private *priv = netdev_priv(dev);
630 struct phy_device *phydev = priv->phydev;
631 int new_state = 0;
632
633 /* only change config if there is a link */
634 spin_lock(&priv->mac_cfg_lock);
635 if (phydev->link) {
636 /* Read old config */
637 u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
638
639 /* Check duplex */
640 if (phydev->duplex != priv->oldduplex) {
641 new_state = 1;
642 if (!(phydev->duplex))
643 cfg_reg |= MAC_CMDCFG_HD_ENA;
644 else
645 cfg_reg &= ~MAC_CMDCFG_HD_ENA;
646
647 netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
648 dev->name, phydev->duplex);
649
650 priv->oldduplex = phydev->duplex;
651 }
652
653 /* Check speed */
654 if (phydev->speed != priv->oldspeed) {
655 new_state = 1;
656 switch (phydev->speed) {
657 case 1000:
658 cfg_reg |= MAC_CMDCFG_ETH_SPEED;
659 cfg_reg &= ~MAC_CMDCFG_ENA_10;
660 break;
661 case 100:
662 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
663 cfg_reg &= ~MAC_CMDCFG_ENA_10;
664 break;
665 case 10:
666 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
667 cfg_reg |= MAC_CMDCFG_ENA_10;
668 break;
669 default:
670 if (netif_msg_link(priv))
671 netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
672 phydev->speed);
673 break;
674 }
675 priv->oldspeed = phydev->speed;
676 }
677 iowrite32(cfg_reg, &priv->mac_dev->command_config);
678
679 if (!priv->oldlink) {
680 new_state = 1;
681 priv->oldlink = 1;
682 }
683 } else if (priv->oldlink) {
684 new_state = 1;
685 priv->oldlink = 0;
686 priv->oldspeed = 0;
687 priv->oldduplex = -1;
688 }
689
690 if (new_state && netif_msg_link(priv))
691 phy_print_status(phydev);
692
693 spin_unlock(&priv->mac_cfg_lock);
694}
695static struct phy_device *connect_local_phy(struct net_device *dev)
696{
697 struct altera_tse_private *priv = netdev_priv(dev);
698 struct phy_device *phydev = NULL;
699 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
700 int ret;
701
702 if (priv->phy_addr != POLL_PHY) {
703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
704 priv->mdio->id, priv->phy_addr);
705
706 netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
707
708 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
709 priv->phy_iface);
710 if (IS_ERR(phydev))
711 netdev_err(dev, "Could not attach to PHY\n");
712
713 } else {
714 phydev = phy_find_first(priv->mdio);
715 if (phydev == NULL) {
716 netdev_err(dev, "No PHY found\n");
717 return phydev;
718 }
719
720 ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
721 priv->phy_iface);
722 if (ret != 0) {
723 netdev_err(dev, "Could not attach to PHY\n");
724 phydev = NULL;
725 }
726 }
727 return phydev;
728}
729
730/* Initialize driver's PHY state, and attach to the PHY
731 */
732static int init_phy(struct net_device *dev)
733{
734 struct altera_tse_private *priv = netdev_priv(dev);
735 struct phy_device *phydev;
736 struct device_node *phynode;
737
738 priv->oldlink = 0;
739 priv->oldspeed = 0;
740 priv->oldduplex = -1;
741
742 phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
743
744 if (!phynode) {
745 netdev_dbg(dev, "no phy-handle found\n");
746 if (!priv->mdio) {
747 netdev_err(dev,
748 "No phy-handle nor local mdio specified\n");
749 return -ENODEV;
750 }
751 phydev = connect_local_phy(dev);
752 } else {
753 netdev_dbg(dev, "phy-handle found\n");
754 phydev = of_phy_connect(dev, phynode,
755 &altera_tse_adjust_link, 0, priv->phy_iface);
756 }
757
758 if (!phydev) {
759 netdev_err(dev, "Could not find the PHY\n");
760 return -ENODEV;
761 }
762
763 /* Stop Advertising 1000BASE Capability if interface is not GMII
764 * Note: Checkpatch throws CHECKs for the camel case defines below,
765 * it's ok to ignore.
766 */
767 if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
768 (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
769 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
770 SUPPORTED_1000baseT_Full);
771
772 /* Broken HW is sometimes missing the pull-up resistor on the
773 * MDIO line, which results in reads to non-existent devices returning
774 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
775 * device as well.
776 * Note: phydev->phy_id is the result of reading the UID PHY registers.
777 */
778 if (phydev->phy_id == 0) {
779 netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
780 phy_disconnect(phydev);
781 return -ENODEV;
782 }
783
784 netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
785 phydev->addr, phydev->phy_id, phydev->link);
786
787 priv->phydev = phydev;
788 return 0;
789}
790
791static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
792{
793 struct altera_tse_mac *mac = priv->mac_dev;
794 u32 msb;
795 u32 lsb;
796
797 msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
799
800 /* Set primary MAC address */
801 iowrite32(msb, &mac->mac_addr_0);
802 iowrite32(lsb, &mac->mac_addr_1);
803}
804
805/* MAC software reset.
806 * When reset is triggered, the MAC function completes the current
807 * transmission or reception, and subsequently disables the transmit and
808 * receive logic, flushes the receive FIFO buffer, and resets the statistics
809 * counters.
810 */
811static int reset_mac(struct altera_tse_private *priv)
812{
813 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
814 int counter;
815 u32 dat;
816
817 dat = ioread32(cmd_cfg_reg);
818 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
819 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
820 iowrite32(dat, cmd_cfg_reg);
821
822 counter = 0;
823 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
824 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
825 break;
826 udelay(1);
827 }
828
829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
830 dat = ioread32(cmd_cfg_reg);
831 dat &= ~MAC_CMDCFG_SW_RESET;
832 iowrite32(dat, cmd_cfg_reg);
833 return -1;
834 }
835 return 0;
836}
837
838/* Initialize MAC core registers
839*/
840static int init_mac(struct altera_tse_private *priv)
841{
842 struct altera_tse_mac *mac = priv->mac_dev;
843 unsigned int cmd = 0;
844 u32 frm_length;
845
846 /* Setup Rx FIFO */
847 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
848 &mac->rx_section_empty);
849 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
850 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
851 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
852
853 /* Setup Tx FIFO */
854 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
855 &mac->tx_section_empty);
856 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
857 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
858 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
859
860 /* MAC Address Configuration */
861 tse_update_mac_addr(priv, priv->dev->dev_addr);
862
863 /* MAC Function Configuration */
864 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
865 iowrite32(frm_length, &mac->frm_length);
866 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
867
868 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
869 * start address
870 */
871 tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
872 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
873 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
874
875 /* Set the MAC options */
876 cmd = ioread32(&mac->command_config);
877 cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */
878 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
879 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
880 * with CRC errors
881 */
882 cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
883 cmd &= ~MAC_CMDCFG_TX_ENA;
884 cmd &= ~MAC_CMDCFG_RX_ENA;
885 iowrite32(cmd, &mac->command_config);
886
887 if (netif_msg_hw(priv))
888 dev_dbg(priv->device,
889 "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
890
891 return 0;
892}
893
894/* Start/stop MAC transmission logic
895 */
896static void tse_set_mac(struct altera_tse_private *priv, bool enable)
897{
898 struct altera_tse_mac *mac = priv->mac_dev;
899 u32 value = ioread32(&mac->command_config);
900
901 if (enable)
902 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
903 else
904 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
905
906 iowrite32(value, &mac->command_config);
907}
908
909/* Change the MTU
910 */
911static int tse_change_mtu(struct net_device *dev, int new_mtu)
912{
913 struct altera_tse_private *priv = netdev_priv(dev);
914 unsigned int max_mtu = priv->max_mtu;
915 unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
916
917 if (netif_running(dev)) {
918 netdev_err(dev, "must be stopped to change its MTU\n");
919 return -EBUSY;
920 }
921
922 if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
923 netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
924 return -EINVAL;
925 }
926
927 dev->mtu = new_mtu;
928 netdev_update_features(dev);
929
930 return 0;
931}
932
933static void altera_tse_set_mcfilter(struct net_device *dev)
934{
935 struct altera_tse_private *priv = netdev_priv(dev);
936 struct altera_tse_mac *mac = priv->mac_dev;
937 int i;
938 struct netdev_hw_addr *ha;
939
940 /* clear the hash filter */
941 for (i = 0; i < 64; i++)
942 iowrite32(0, &(mac->hash_table[i]));
943
944 netdev_for_each_mc_addr(ha, dev) {
945 unsigned int hash = 0;
946 int mac_octet;
947
948 for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
949 unsigned char xor_bit = 0;
950 unsigned char octet = ha->addr[mac_octet];
951 unsigned int bitshift;
952
953 for (bitshift = 0; bitshift < 8; bitshift++)
954 xor_bit ^= ((octet >> bitshift) & 0x01);
955
956 hash = (hash << 1) | xor_bit;
957 }
958 iowrite32(1, &(mac->hash_table[hash]));
959 }
960}
961
962
963static void altera_tse_set_mcfilterall(struct net_device *dev)
964{
965 struct altera_tse_private *priv = netdev_priv(dev);
966 struct altera_tse_mac *mac = priv->mac_dev;
967 int i;
968
969 /* set the hash filter */
970 for (i = 0; i < 64; i++)
971 iowrite32(1, &(mac->hash_table[i]));
972}
973
974/* Set or clear the multicast filter for this adaptor
975 */
976static void tse_set_rx_mode_hashfilter(struct net_device *dev)
977{
978 struct altera_tse_private *priv = netdev_priv(dev);
979 struct altera_tse_mac *mac = priv->mac_dev;
980
981 spin_lock(&priv->mac_cfg_lock);
982
983 if (dev->flags & IFF_PROMISC)
984 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
985
986 if (dev->flags & IFF_ALLMULTI)
987 altera_tse_set_mcfilterall(dev);
988 else
989 altera_tse_set_mcfilter(dev);
990
991 spin_unlock(&priv->mac_cfg_lock);
992}
993
994/* Set or clear the multicast filter for this adaptor
995 */
996static void tse_set_rx_mode(struct net_device *dev)
997{
998 struct altera_tse_private *priv = netdev_priv(dev);
999 struct altera_tse_mac *mac = priv->mac_dev;
1000
1001 spin_lock(&priv->mac_cfg_lock);
1002
1003 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1004 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1005 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
1006 else
1007 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
1008
1009 spin_unlock(&priv->mac_cfg_lock);
1010}
1011
1012/* Open and initialize the interface
1013 */
1014static int tse_open(struct net_device *dev)
1015{
1016 struct altera_tse_private *priv = netdev_priv(dev);
1017 int ret = 0;
1018 int i;
1019 unsigned long int flags;
1020
1021 /* Reset and configure TSE MAC and probe associated PHY */
1022 ret = priv->dmaops->init_dma(priv);
1023 if (ret != 0) {
1024 netdev_err(dev, "Cannot initialize DMA\n");
1025 goto phy_error;
1026 }
1027
1028 if (netif_msg_ifup(priv))
1029 netdev_warn(dev, "device MAC address %pM\n",
1030 dev->dev_addr);
1031
1032 if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1033 netdev_warn(dev, "TSE revision %x\n", priv->revision);
1034
1035 spin_lock(&priv->mac_cfg_lock);
1036 ret = reset_mac(priv);
1037 if (ret)
1038 netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
1039
1040 ret = init_mac(priv);
1041 spin_unlock(&priv->mac_cfg_lock);
1042 if (ret) {
1043 netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1044 goto alloc_skbuf_error;
1045 }
1046
1047 priv->dmaops->reset_dma(priv);
1048
1049 /* Create and initialize the TX/RX descriptors chains. */
1050 priv->rx_ring_size = dma_rx_num;
1051 priv->tx_ring_size = dma_tx_num;
1052 ret = alloc_init_skbufs(priv);
1053 if (ret) {
1054 netdev_err(dev, "DMA descriptors initialization failed\n");
1055 goto alloc_skbuf_error;
1056 }
1057
1058
1059 /* Register RX interrupt */
1060 ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1061 dev->name, dev);
1062 if (ret) {
1063 netdev_err(dev, "Unable to register RX interrupt %d\n",
1064 priv->rx_irq);
1065 goto init_error;
1066 }
1067
1068 /* Register TX interrupt */
1069 ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1070 dev->name, dev);
1071 if (ret) {
1072 netdev_err(dev, "Unable to register TX interrupt %d\n",
1073 priv->tx_irq);
1074 goto tx_request_irq_error;
1075 }
1076
1077 /* Enable DMA interrupts */
1078 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1079 priv->dmaops->enable_rxirq(priv);
1080 priv->dmaops->enable_txirq(priv);
1081
1082 /* Setup RX descriptor chain */
1083 for (i = 0; i < priv->rx_ring_size; i++)
1084 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1085
1086 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1087
1088 /* Start MAC Rx/Tx */
1089 spin_lock(&priv->mac_cfg_lock);
1090 tse_set_mac(priv, true);
1091 spin_unlock(&priv->mac_cfg_lock);
1092
1093 if (priv->phydev)
1094 phy_start(priv->phydev);
1095
1096 napi_enable(&priv->napi);
1097 netif_start_queue(dev);
1098
1099 return 0;
1100
1101tx_request_irq_error:
1102 free_irq(priv->rx_irq, dev);
1103init_error:
1104 free_skbufs(dev);
1105alloc_skbuf_error:
1106 if (priv->phydev) {
1107 phy_disconnect(priv->phydev);
1108 priv->phydev = NULL;
1109 }
1110phy_error:
1111 return ret;
1112}
1113
1114/* Stop TSE MAC interface and put the device in an inactive state
1115 */
1116static int tse_shutdown(struct net_device *dev)
1117{
1118 struct altera_tse_private *priv = netdev_priv(dev);
1119 int ret;
1120 unsigned long int flags;
1121
1122 /* Stop and disconnect the PHY */
1123 if (priv->phydev) {
1124 phy_stop(priv->phydev);
1125 phy_disconnect(priv->phydev);
1126 priv->phydev = NULL;
1127 }
1128
1129 netif_stop_queue(dev);
1130 napi_disable(&priv->napi);
1131
1132 /* Disable DMA interrupts */
1133 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1134 priv->dmaops->disable_rxirq(priv);
1135 priv->dmaops->disable_txirq(priv);
1136 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1137
1138 /* Free the IRQ lines */
1139 free_irq(priv->rx_irq, dev);
1140 free_irq(priv->tx_irq, dev);
1141
1142 /* disable and reset the MAC, empties fifo */
1143 spin_lock(&priv->mac_cfg_lock);
1144 spin_lock(&priv->tx_lock);
1145
1146 ret = reset_mac(priv);
1147 if (ret)
1148 netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
1149 priv->dmaops->reset_dma(priv);
1150 free_skbufs(dev);
1151
1152 spin_unlock(&priv->tx_lock);
1153 spin_unlock(&priv->mac_cfg_lock);
1154
1155 priv->dmaops->uninit_dma(priv);
1156
1157 return 0;
1158}
1159
1160static struct net_device_ops altera_tse_netdev_ops = {
1161 .ndo_open = tse_open,
1162 .ndo_stop = tse_shutdown,
1163 .ndo_start_xmit = tse_start_xmit,
1164 .ndo_set_mac_address = eth_mac_addr,
1165 .ndo_set_rx_mode = tse_set_rx_mode,
1166 .ndo_change_mtu = tse_change_mtu,
1167 .ndo_validate_addr = eth_validate_addr,
1168};
1169
1170
1171static int request_and_map(struct platform_device *pdev, const char *name,
1172 struct resource **res, void __iomem **ptr)
1173{
1174 struct resource *region;
1175 struct device *device = &pdev->dev;
1176
1177 *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1178 if (*res == NULL) {
1179 dev_err(device, "resource %s not defined\n", name);
1180 return -ENODEV;
1181 }
1182
1183 region = devm_request_mem_region(device, (*res)->start,
1184 resource_size(*res), dev_name(device));
1185 if (region == NULL) {
1186 dev_err(device, "unable to request %s\n", name);
1187 return -EBUSY;
1188 }
1189
1190 *ptr = devm_ioremap_nocache(device, region->start,
1191 resource_size(region));
1192 if (*ptr == NULL) {
1193 dev_err(device, "ioremap_nocache of %s failed!", name);
1194 return -ENOMEM;
1195 }
1196
1197 return 0;
1198}
1199
1200/* Probe Altera TSE MAC device
1201 */
1202static int altera_tse_probe(struct platform_device *pdev)
1203{
1204 struct net_device *ndev;
1205 int ret = -ENODEV;
1206 struct resource *control_port;
1207 struct resource *dma_res;
1208 struct altera_tse_private *priv;
1209 const unsigned char *macaddr;
1210 struct device_node *np = pdev->dev.of_node;
1211 void __iomem *descmap;
1212 const struct of_device_id *of_id = NULL;
1213
1214 ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1215 if (!ndev) {
1216 dev_err(&pdev->dev, "Could not allocate network device\n");
1217 return -ENODEV;
1218 }
1219
1220 SET_NETDEV_DEV(ndev, &pdev->dev);
1221
1222 priv = netdev_priv(ndev);
1223 priv->device = &pdev->dev;
1224 priv->dev = ndev;
1225 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1226
1227 of_id = of_match_device(altera_tse_ids, &pdev->dev);
1228
1229 if (of_id)
1230 priv->dmaops = (struct altera_dmaops *)of_id->data;
1231
1232
1233 if (priv->dmaops &&
1234 priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1235 /* Get the mapped address to the SGDMA descriptor memory */
1236 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1237 if (ret)
1238 goto out_free;
1239
1240 /* Start of that memory is for transmit descriptors */
1241 priv->tx_dma_desc = descmap;
1242
1243 /* First half is for tx descriptors, other half for tx */
1244 priv->txdescmem = resource_size(dma_res)/2;
1245
1246 priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1247
1248 priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1249 priv->txdescmem));
1250 priv->rxdescmem = resource_size(dma_res)/2;
1251 priv->rxdescmem_busaddr = dma_res->start;
1252 priv->rxdescmem_busaddr += priv->txdescmem;
1253
1254 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1255 dev_dbg(priv->device,
1256 "SGDMA bus addresses greater than 32-bits\n");
1257 goto out_free;
1258 }
1259 if (upper_32_bits(priv->txdescmem_busaddr)) {
1260 dev_dbg(priv->device,
1261 "SGDMA bus addresses greater than 32-bits\n");
1262 goto out_free;
1263 }
1264 } else if (priv->dmaops &&
1265 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1266 ret = request_and_map(pdev, "rx_resp", &dma_res,
1267 &priv->rx_dma_resp);
1268 if (ret)
1269 goto out_free;
1270
1271 ret = request_and_map(pdev, "tx_desc", &dma_res,
1272 &priv->tx_dma_desc);
1273 if (ret)
1274 goto out_free;
1275
1276 priv->txdescmem = resource_size(dma_res);
1277 priv->txdescmem_busaddr = dma_res->start;
1278
1279 ret = request_and_map(pdev, "rx_desc", &dma_res,
1280 &priv->rx_dma_desc);
1281 if (ret)
1282 goto out_free;
1283
1284 priv->rxdescmem = resource_size(dma_res);
1285 priv->rxdescmem_busaddr = dma_res->start;
1286
1287 } else {
1288 goto out_free;
1289 }
1290
1291 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1292 dma_set_coherent_mask(priv->device,
1293 DMA_BIT_MASK(priv->dmaops->dmamask));
1294 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1295 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1296 else
1297 goto out_free;
1298
1299 /* MAC address space */
1300 ret = request_and_map(pdev, "control_port", &control_port,
1301 (void __iomem **)&priv->mac_dev);
1302 if (ret)
1303 goto out_free;
1304
1305 /* xSGDMA Rx Dispatcher address space */
1306 ret = request_and_map(pdev, "rx_csr", &dma_res,
1307 &priv->rx_dma_csr);
1308 if (ret)
1309 goto out_free;
1310
1311
1312 /* xSGDMA Tx Dispatcher address space */
1313 ret = request_and_map(pdev, "tx_csr", &dma_res,
1314 &priv->tx_dma_csr);
1315 if (ret)
1316 goto out_free;
1317
1318
1319 /* Rx IRQ */
1320 priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1321 if (priv->rx_irq == -ENXIO) {
1322 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1323 ret = -ENXIO;
1324 goto out_free;
1325 }
1326
1327 /* Tx IRQ */
1328 priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1329 if (priv->tx_irq == -ENXIO) {
1330 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1331 ret = -ENXIO;
1332 goto out_free;
1333 }
1334
1335 /* get FIFO depths from device tree */
1336 if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1337 &priv->rx_fifo_depth)) {
1338 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1339 ret = -ENXIO;
1340 goto out_free;
1341 }
1342
1343 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1344 &priv->rx_fifo_depth)) {
1345 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1346 ret = -ENXIO;
1347 goto out_free;
1348 }
1349
1350 /* get hash filter settings for this instance */
1351 priv->hash_filter =
1352 of_property_read_bool(pdev->dev.of_node,
1353 "altr,has-hash-multicast-filter");
1354
1355 /* get supplemental address settings for this instance */
1356 priv->added_unicast =
1357 of_property_read_bool(pdev->dev.of_node,
1358 "altr,has-supplementary-unicast");
1359
1360 /* Max MTU is 1500, ETH_DATA_LEN */
1361 priv->max_mtu = ETH_DATA_LEN;
1362
1363 /* Get the max mtu from the device tree. Note that the
1364 * "max-frame-size" parameter is actually max mtu. Definition
1365 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1366 */
1367 of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1368 &priv->max_mtu);
1369
1370 /* The DMA buffer size already accounts for an alignment bias
1371 * to avoid unaligned access exceptions for the NIOS processor,
1372 */
1373 priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1374
1375 /* get default MAC address from device tree */
1376 macaddr = of_get_mac_address(pdev->dev.of_node);
1377 if (macaddr)
1378 ether_addr_copy(ndev->dev_addr, macaddr);
1379 else
1380 eth_hw_addr_random(ndev);
1381
1382 priv->phy_iface = of_get_phy_mode(np);
1383
1384 /* try to get PHY address from device tree, use PHY autodetection if
1385 * no valid address is given
1386 */
1387 if (of_property_read_u32(pdev->dev.of_node, "phy-addr",
1388 &priv->phy_addr)) {
1389 priv->phy_addr = POLL_PHY;
1390 }
1391
1392 if (!((priv->phy_addr == POLL_PHY) ||
1393 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
1394 dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
1395 priv->phy_addr);
1396 goto out_free;
1397 }
1398
1399 /* Create/attach to MDIO bus */
1400 ret = altera_tse_mdio_create(ndev,
1401 atomic_add_return(1, &instance_count));
1402
1403 if (ret)
1404 goto out_free;
1405
1406 /* initialize netdev */
1407 ether_setup(ndev);
1408 ndev->mem_start = control_port->start;
1409 ndev->mem_end = control_port->end;
1410 ndev->netdev_ops = &altera_tse_netdev_ops;
1411 altera_tse_set_ethtool_ops(ndev);
1412
1413 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1414
1415 if (priv->hash_filter)
1416 altera_tse_netdev_ops.ndo_set_rx_mode =
1417 tse_set_rx_mode_hashfilter;
1418
1419 /* Scatter/gather IO is not supported,
1420 * so it is turned off
1421 */
1422 ndev->hw_features &= ~NETIF_F_SG;
1423 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1424
1425 /* VLAN offloading of tagging, stripping and filtering is not
1426 * supported by hardware, but driver will accommodate the
1427 * extra 4-byte VLAN tag for processing by upper layers
1428 */
1429 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1430
1431 /* setup NAPI interface */
1432 netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1433
1434 spin_lock_init(&priv->mac_cfg_lock);
1435 spin_lock_init(&priv->tx_lock);
1436 spin_lock_init(&priv->rxdma_irq_lock);
1437
1438 ret = register_netdev(ndev);
1439 if (ret) {
1440 dev_err(&pdev->dev, "failed to register TSE net device\n");
1441 goto out_free_mdio;
1442 }
1443
1444 platform_set_drvdata(pdev, ndev);
1445
1446 priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1447
1448 if (netif_msg_probe(priv))
1449 dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1450 (priv->revision >> 8) & 0xff,
1451 priv->revision & 0xff,
1452 (unsigned long) control_port->start, priv->rx_irq,
1453 priv->tx_irq);
1454
1455 ret = init_phy(ndev);
1456 if (ret != 0) {
1457 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1458 goto out_free_mdio;
1459 }
1460 return 0;
1461
1462out_free_mdio:
1463 altera_tse_mdio_destroy(ndev);
1464out_free:
1465 free_netdev(ndev);
1466 return ret;
1467}
1468
1469/* Remove Altera TSE MAC device
1470 */
1471static int altera_tse_remove(struct platform_device *pdev)
1472{
1473 struct net_device *ndev = platform_get_drvdata(pdev);
1474
1475 platform_set_drvdata(pdev, NULL);
1476 altera_tse_mdio_destroy(ndev);
1477 unregister_netdev(ndev);
1478 free_netdev(ndev);
1479
1480 return 0;
1481}
1482
1483struct altera_dmaops altera_dtype_sgdma = {
1484 .altera_dtype = ALTERA_DTYPE_SGDMA,
1485 .dmamask = 32,
1486 .reset_dma = sgdma_reset,
1487 .enable_txirq = sgdma_enable_txirq,
1488 .enable_rxirq = sgdma_enable_rxirq,
1489 .disable_txirq = sgdma_disable_txirq,
1490 .disable_rxirq = sgdma_disable_rxirq,
1491 .clear_txirq = sgdma_clear_txirq,
1492 .clear_rxirq = sgdma_clear_rxirq,
1493 .tx_buffer = sgdma_tx_buffer,
1494 .tx_completions = sgdma_tx_completions,
1495 .add_rx_desc = sgdma_add_rx_desc,
1496 .get_rx_status = sgdma_rx_status,
1497 .init_dma = sgdma_initialize,
1498 .uninit_dma = sgdma_uninitialize,
1499};
1500
1501struct altera_dmaops altera_dtype_msgdma = {
1502 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1503 .dmamask = 64,
1504 .reset_dma = msgdma_reset,
1505 .enable_txirq = msgdma_enable_txirq,
1506 .enable_rxirq = msgdma_enable_rxirq,
1507 .disable_txirq = msgdma_disable_txirq,
1508 .disable_rxirq = msgdma_disable_rxirq,
1509 .clear_txirq = msgdma_clear_txirq,
1510 .clear_rxirq = msgdma_clear_rxirq,
1511 .tx_buffer = msgdma_tx_buffer,
1512 .tx_completions = msgdma_tx_completions,
1513 .add_rx_desc = msgdma_add_rx_desc,
1514 .get_rx_status = msgdma_rx_status,
1515 .init_dma = msgdma_initialize,
1516 .uninit_dma = msgdma_uninitialize,
1517};
1518
1519static struct of_device_id altera_tse_ids[] = {
1520 { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1521 { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1522 { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1523 {},
1524};
1525MODULE_DEVICE_TABLE(of, altera_tse_ids);
1526
1527static struct platform_driver altera_tse_driver = {
1528 .probe = altera_tse_probe,
1529 .remove = altera_tse_remove,
1530 .suspend = NULL,
1531 .resume = NULL,
1532 .driver = {
1533 .name = ALTERA_TSE_RESOURCE_NAME,
1534 .owner = THIS_MODULE,
1535 .of_match_table = altera_tse_ids,
1536 },
1537};
1538
1539module_platform_driver(altera_tse_driver);
1540
1541MODULE_AUTHOR("Altera Corporation");
1542MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1543MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
new file mode 100644
index 000000000000..70fa13f486b2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -0,0 +1,44 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include "altera_tse.h"
18#include "altera_utils.h"
19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
21{
22 u32 value = ioread32(ioaddr);
23 value |= bit_mask;
24 iowrite32(value, ioaddr);
25}
26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
28{
29 u32 value = ioread32(ioaddr);
30 value &= ~bit_mask;
31 iowrite32(value, ioaddr);
32}
33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
35{
36 u32 value = ioread32(ioaddr);
37 return (value & bit_mask) ? 1 : 0;
38}
39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
41{
42 u32 value = ioread32(ioaddr);
43 return (value & bit_mask) ? 0 : 1;
44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
new file mode 100644
index 000000000000..ce1db36d3583
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -0,0 +1,27 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18
19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__
21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
26
27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 18e542f7853d..98a10d555b79 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
578 outs++; 578 outs++;
579 /* Kick the lance: transmit now */ 579 /* Kick the lance: transmit now */
580 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); 580 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
581 dev_kfree_skb(skb); 581 dev_consume_skb_any(skb);
582 582
583 spin_lock_irqsave(&lp->devlock, flags); 583 spin_lock_irqsave(&lp->devlock, flags);
584 if (TX_BUFFS_AVAIL) 584 if (TX_BUFFS_AVAIL)
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 9793767996a2..87e727b921dc 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
472 if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN) 472 if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
473 netif_stop_queue(dev); 473 netif_stop_queue(dev);
474 474
475 dev_kfree_skb(skb); 475 dev_consume_skb_any(skb);
476 476
477 return NETDEV_TX_OK; 477 return NETDEV_TX_OK;
478} 478}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 2061b471fd16..26efaaa5e73f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -720,6 +720,9 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
720 int rx_pkt_limit = budget; 720 int rx_pkt_limit = budget;
721 unsigned long flags; 721 unsigned long flags;
722 722
723 if (rx_pkt_limit <= 0)
724 goto rx_not_empty;
725
723 do{ 726 do{
724 /* process receive packets until we use the quota*/ 727 /* process receive packets until we use the quota*/
725 /* If we own the next entry, it's a new packet. Send it up. */ 728 /* If we own the next entry, it's a new packet. Send it up. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 9339cccfe05a..e7cc9174e364 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
549 struct pcnet32_rx_head *new_rx_ring; 549 struct pcnet32_rx_head *new_rx_ring;
550 struct sk_buff **new_skb_list; 550 struct sk_buff **new_skb_list;
551 int new, overlap; 551 int new, overlap;
552 unsigned int entries = 1 << size;
552 553
553 new_rx_ring = pci_alloc_consistent(lp->pci_dev, 554 new_rx_ring = pci_alloc_consistent(lp->pci_dev,
554 sizeof(struct pcnet32_rx_head) * 555 sizeof(struct pcnet32_rx_head) *
555 (1 << size), 556 entries,
556 &new_ring_dma_addr); 557 &new_ring_dma_addr);
557 if (new_rx_ring == NULL) { 558 if (new_rx_ring == NULL) {
558 netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); 559 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
559 return; 560 return;
560 } 561 }
561 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); 562 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
562 563
563 new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC); 564 new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
564 if (!new_dma_addr_list) 565 if (!new_dma_addr_list)
565 goto free_new_rx_ring; 566 goto free_new_rx_ring;
566 567
567 new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), 568 new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
568 GFP_ATOMIC);
569 if (!new_skb_list) 569 if (!new_skb_list)
570 goto free_new_lists; 570 goto free_new_lists;
571 571
572 /* first copy the current receive buffers */ 572 /* first copy the current receive buffers */
573 overlap = min(size, lp->rx_ring_size); 573 overlap = min(entries, lp->rx_ring_size);
574 for (new = 0; new < overlap; new++) { 574 for (new = 0; new < overlap; new++) {
575 new_rx_ring[new] = lp->rx_ring[new]; 575 new_rx_ring[new] = lp->rx_ring[new];
576 new_dma_addr_list[new] = lp->rx_dma_addr[new]; 576 new_dma_addr_list[new] = lp->rx_dma_addr[new];
577 new_skb_list[new] = lp->rx_skbuff[new]; 577 new_skb_list[new] = lp->rx_skbuff[new];
578 } 578 }
579 /* now allocate any new buffers needed */ 579 /* now allocate any new buffers needed */
580 for (; new < size; new++) { 580 for (; new < entries; new++) {
581 struct sk_buff *rx_skbuff; 581 struct sk_buff *rx_skbuff;
582 new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); 582 new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
583 rx_skbuff = new_skb_list[new]; 583 rx_skbuff = new_skb_list[new];
@@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
592 new_dma_addr_list[new] = 592 new_dma_addr_list[new] =
593 pci_map_single(lp->pci_dev, rx_skbuff->data, 593 pci_map_single(lp->pci_dev, rx_skbuff->data,
594 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 594 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
595 if (pci_dma_mapping_error(lp->pci_dev,
596 new_dma_addr_list[new])) {
597 netif_err(lp, drv, dev, "%s dma mapping failed\n",
598 __func__);
599 dev_kfree_skb(new_skb_list[new]);
600 goto free_all_new;
601 }
595 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); 602 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
596 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); 603 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
597 new_rx_ring[new].status = cpu_to_le16(0x8000); 604 new_rx_ring[new].status = cpu_to_le16(0x8000);
@@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
599 /* and free any unneeded buffers */ 606 /* and free any unneeded buffers */
600 for (; new < lp->rx_ring_size; new++) { 607 for (; new < lp->rx_ring_size; new++) {
601 if (lp->rx_skbuff[new]) { 608 if (lp->rx_skbuff[new]) {
602 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], 609 if (!pci_dma_mapping_error(lp->pci_dev,
603 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 610 lp->rx_dma_addr[new]))
611 pci_unmap_single(lp->pci_dev,
612 lp->rx_dma_addr[new],
613 PKT_BUF_SIZE,
614 PCI_DMA_FROMDEVICE);
604 dev_kfree_skb(lp->rx_skbuff[new]); 615 dev_kfree_skb(lp->rx_skbuff[new]);
605 } 616 }
606 } 617 }
@@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
612 lp->rx_ring_size, lp->rx_ring, 623 lp->rx_ring_size, lp->rx_ring,
613 lp->rx_ring_dma_addr); 624 lp->rx_ring_dma_addr);
614 625
615 lp->rx_ring_size = (1 << size); 626 lp->rx_ring_size = entries;
616 lp->rx_mod_mask = lp->rx_ring_size - 1; 627 lp->rx_mod_mask = lp->rx_ring_size - 1;
617 lp->rx_len_bits = (size << 4); 628 lp->rx_len_bits = (size << 4);
618 lp->rx_ring = new_rx_ring; 629 lp->rx_ring = new_rx_ring;
@@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
624free_all_new: 635free_all_new:
625 while (--new >= lp->rx_ring_size) { 636 while (--new >= lp->rx_ring_size) {
626 if (new_skb_list[new]) { 637 if (new_skb_list[new]) {
627 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 638 if (!pci_dma_mapping_error(lp->pci_dev,
628 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 639 new_dma_addr_list[new]))
640 pci_unmap_single(lp->pci_dev,
641 new_dma_addr_list[new],
642 PKT_BUF_SIZE,
643 PCI_DMA_FROMDEVICE);
629 dev_kfree_skb(new_skb_list[new]); 644 dev_kfree_skb(new_skb_list[new]);
630 } 645 }
631 } 646 }
@@ -634,8 +649,7 @@ free_new_lists:
634 kfree(new_dma_addr_list); 649 kfree(new_dma_addr_list);
635free_new_rx_ring: 650free_new_rx_ring:
636 pci_free_consistent(lp->pci_dev, 651 pci_free_consistent(lp->pci_dev,
637 sizeof(struct pcnet32_rx_head) * 652 sizeof(struct pcnet32_rx_head) * entries,
638 (1 << size),
639 new_rx_ring, 653 new_rx_ring,
640 new_ring_dma_addr); 654 new_ring_dma_addr);
641} 655}
@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
650 lp->rx_ring[i].status = 0; /* CPU owns buffer */ 664 lp->rx_ring[i].status = 0; /* CPU owns buffer */
651 wmb(); /* Make sure adapter sees owner change */ 665 wmb(); /* Make sure adapter sees owner change */
652 if (lp->rx_skbuff[i]) { 666 if (lp->rx_skbuff[i]) {
653 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], 667 if (!pci_dma_mapping_error(lp->pci_dev,
654 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 668 lp->rx_dma_addr[i]))
669 pci_unmap_single(lp->pci_dev,
670 lp->rx_dma_addr[i],
671 PKT_BUF_SIZE,
672 PCI_DMA_FROMDEVICE);
655 dev_kfree_skb_any(lp->rx_skbuff[i]); 673 dev_kfree_skb_any(lp->rx_skbuff[i]);
656 } 674 }
657 lp->rx_skbuff[i] = NULL; 675 lp->rx_skbuff[i] = NULL;
@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
930 lp->tx_dma_addr[x] = 948 lp->tx_dma_addr[x] =
931 pci_map_single(lp->pci_dev, skb->data, skb->len, 949 pci_map_single(lp->pci_dev, skb->data, skb->len,
932 PCI_DMA_TODEVICE); 950 PCI_DMA_TODEVICE);
951 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
952 netif_printk(lp, hw, KERN_DEBUG, dev,
953 "DMA mapping error at line: %d!\n",
954 __LINE__);
955 goto clean_up;
956 }
933 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); 957 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
934 wmb(); /* Make sure owner changes after all others are visible */ 958 wmb(); /* Make sure owner changes after all others are visible */
935 lp->tx_ring[x].status = cpu_to_le16(status); 959 lp->tx_ring[x].status = cpu_to_le16(status);
@@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev,
1142 1166
1143 if (pkt_len > rx_copybreak) { 1167 if (pkt_len > rx_copybreak) {
1144 struct sk_buff *newskb; 1168 struct sk_buff *newskb;
1169 dma_addr_t new_dma_addr;
1145 1170
1146 newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); 1171 newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
1172 /*
1173 * map the new buffer, if mapping fails, drop the packet and
1174 * reuse the old buffer
1175 */
1147 if (newskb) { 1176 if (newskb) {
1148 skb_reserve(newskb, NET_IP_ALIGN); 1177 skb_reserve(newskb, NET_IP_ALIGN);
1149 skb = lp->rx_skbuff[entry]; 1178 new_dma_addr = pci_map_single(lp->pci_dev,
1150 pci_unmap_single(lp->pci_dev, 1179 newskb->data,
1151 lp->rx_dma_addr[entry], 1180 PKT_BUF_SIZE,
1152 PKT_BUF_SIZE, 1181 PCI_DMA_FROMDEVICE);
1153 PCI_DMA_FROMDEVICE); 1182 if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
1154 skb_put(skb, pkt_len); 1183 netif_err(lp, rx_err, dev,
1155 lp->rx_skbuff[entry] = newskb; 1184 "DMA mapping error.\n");
1156 lp->rx_dma_addr[entry] = 1185 dev_kfree_skb(newskb);
1157 pci_map_single(lp->pci_dev, 1186 skb = NULL;
1158 newskb->data, 1187 } else {
1159 PKT_BUF_SIZE, 1188 skb = lp->rx_skbuff[entry];
1160 PCI_DMA_FROMDEVICE); 1189 pci_unmap_single(lp->pci_dev,
1161 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); 1190 lp->rx_dma_addr[entry],
1162 rx_in_place = 1; 1191 PKT_BUF_SIZE,
1192 PCI_DMA_FROMDEVICE);
1193 skb_put(skb, pkt_len);
1194 lp->rx_skbuff[entry] = newskb;
1195 lp->rx_dma_addr[entry] = new_dma_addr;
1196 rxp->base = cpu_to_le32(new_dma_addr);
1197 rx_in_place = 1;
1198 }
1163 } else 1199 } else
1164 skb = NULL; 1200 skb = NULL;
1165 } else 1201 } else
@@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
2229 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2265 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2230 wmb(); /* Make sure adapter sees owner change */ 2266 wmb(); /* Make sure adapter sees owner change */
2231 if (lp->tx_skbuff[i]) { 2267 if (lp->tx_skbuff[i]) {
2232 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], 2268 if (!pci_dma_mapping_error(lp->pci_dev,
2233 lp->tx_skbuff[i]->len, 2269 lp->tx_dma_addr[i]))
2234 PCI_DMA_TODEVICE); 2270 pci_unmap_single(lp->pci_dev,
2271 lp->tx_dma_addr[i],
2272 lp->tx_skbuff[i]->len,
2273 PCI_DMA_TODEVICE);
2235 dev_kfree_skb_any(lp->tx_skbuff[i]); 2274 dev_kfree_skb_any(lp->tx_skbuff[i]);
2236 } 2275 }
2237 lp->tx_skbuff[i] = NULL; 2276 lp->tx_skbuff[i] = NULL;
@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
2264 } 2303 }
2265 2304
2266 rmb(); 2305 rmb();
2267 if (lp->rx_dma_addr[i] == 0) 2306 if (lp->rx_dma_addr[i] == 0) {
2268 lp->rx_dma_addr[i] = 2307 lp->rx_dma_addr[i] =
2269 pci_map_single(lp->pci_dev, rx_skbuff->data, 2308 pci_map_single(lp->pci_dev, rx_skbuff->data,
2270 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 2309 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2310 if (pci_dma_mapping_error(lp->pci_dev,
2311 lp->rx_dma_addr[i])) {
2312 /* there is not much we can do at this point */
2313 netif_err(lp, drv, dev,
2314 "%s pci dma mapping error\n",
2315 __func__);
2316 return -1;
2317 }
2318 }
2271 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); 2319 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2272 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); 2320 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2273 wmb(); /* Make sure owner changes after all others are visible */ 2321 wmb(); /* Make sure owner changes after all others are visible */
@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2397 2445
2398 lp->tx_ring[entry].misc = 0x00000000; 2446 lp->tx_ring[entry].misc = 0x00000000;
2399 2447
2400 lp->tx_skbuff[entry] = skb;
2401 lp->tx_dma_addr[entry] = 2448 lp->tx_dma_addr[entry] =
2402 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); 2449 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2450 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
2451 dev_kfree_skb_any(skb);
2452 dev->stats.tx_dropped++;
2453 goto drop_packet;
2454 }
2455 lp->tx_skbuff[entry] = skb;
2403 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); 2456 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
2404 wmb(); /* Make sure owner changes after all others are visible */ 2457 wmb(); /* Make sure owner changes after all others are visible */
2405 lp->tx_ring[entry].status = cpu_to_le16(status); 2458 lp->tx_ring[entry].status = cpu_to_le16(status);
@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2414 lp->tx_full = 1; 2467 lp->tx_full = 1;
2415 netif_stop_queue(dev); 2468 netif_stop_queue(dev);
2416 } 2469 }
2470drop_packet:
2417 spin_unlock_irqrestore(&lp->lock, flags); 2471 spin_unlock_irqrestore(&lp->lock, flags);
2418 return NETDEV_TX_OK; 2472 return NETDEV_TX_OK;
2419} 2473}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 380d24922049..17bb9ce96260 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -535,7 +535,7 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
535 if (!alx->descmem.virt) 535 if (!alx->descmem.virt)
536 goto out_free; 536 goto out_free;
537 537
538 alx->txq.tpd = (void *)alx->descmem.virt; 538 alx->txq.tpd = alx->descmem.virt;
539 alx->txq.tpd_dma = alx->descmem.dma; 539 alx->txq.tpd_dma = alx->descmem.dma;
540 540
541 /* alignment requirement for next block */ 541 /* alignment requirement for next block */
@@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1097 return NETDEV_TX_OK; 1097 return NETDEV_TX_OK;
1098 1098
1099drop: 1099drop:
1100 dev_kfree_skb(skb); 1100 dev_kfree_skb_any(skb);
1101 return NETDEV_TX_OK; 1101 return NETDEV_TX_OK;
1102} 1102}
1103 1103
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 4d3258dd0a88..e11bf18fbbd1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
832} 832}
833 833
834static inline void atl1c_clean_buffer(struct pci_dev *pdev, 834static inline void atl1c_clean_buffer(struct pci_dev *pdev,
835 struct atl1c_buffer *buffer_info, int in_irq) 835 struct atl1c_buffer *buffer_info)
836{ 836{
837 u16 pci_driection; 837 u16 pci_driection;
838 if (buffer_info->flags & ATL1C_BUFFER_FREE) 838 if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
850 pci_unmap_page(pdev, buffer_info->dma, 850 pci_unmap_page(pdev, buffer_info->dma,
851 buffer_info->length, pci_driection); 851 buffer_info->length, pci_driection);
852 } 852 }
853 if (buffer_info->skb) { 853 if (buffer_info->skb)
854 if (in_irq) 854 dev_consume_skb_any(buffer_info->skb);
855 dev_kfree_skb_irq(buffer_info->skb);
856 else
857 dev_kfree_skb(buffer_info->skb);
858 }
859 buffer_info->dma = 0; 855 buffer_info->dma = 0;
860 buffer_info->skb = NULL; 856 buffer_info->skb = NULL;
861 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); 857 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
875 ring_count = tpd_ring->count; 871 ring_count = tpd_ring->count;
876 for (index = 0; index < ring_count; index++) { 872 for (index = 0; index < ring_count; index++) {
877 buffer_info = &tpd_ring->buffer_info[index]; 873 buffer_info = &tpd_ring->buffer_info[index];
878 atl1c_clean_buffer(pdev, buffer_info, 0); 874 atl1c_clean_buffer(pdev, buffer_info);
879 } 875 }
880 876
881 /* Zero out Tx-buffers */ 877 /* Zero out Tx-buffers */
@@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
899 895
900 for (j = 0; j < rfd_ring->count; j++) { 896 for (j = 0; j < rfd_ring->count; j++) {
901 buffer_info = &rfd_ring->buffer_info[j]; 897 buffer_info = &rfd_ring->buffer_info[j];
902 atl1c_clean_buffer(pdev, buffer_info, 0); 898 atl1c_clean_buffer(pdev, buffer_info);
903 } 899 }
904 /* zero out the descriptor ring */ 900 /* zero out the descriptor ring */
905 memset(rfd_ring->desc, 0, rfd_ring->size); 901 memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1562 1558
1563 while (next_to_clean != hw_next_to_clean) { 1559 while (next_to_clean != hw_next_to_clean) {
1564 buffer_info = &tpd_ring->buffer_info[next_to_clean]; 1560 buffer_info = &tpd_ring->buffer_info[next_to_clean];
1565 atl1c_clean_buffer(pdev, buffer_info, 1); 1561 atl1c_clean_buffer(pdev, buffer_info);
1566 if (++next_to_clean == tpd_ring->count) 1562 if (++next_to_clean == tpd_ring->count)
1567 next_to_clean = 0; 1563 next_to_clean = 0;
1568 atomic_set(&tpd_ring->next_to_clean, next_to_clean); 1564 atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1977,17 +1973,17 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
1977 enum atl1c_trans_queue type) 1973 enum atl1c_trans_queue type)
1978{ 1974{
1979 struct pci_dev *pdev = adapter->pdev; 1975 struct pci_dev *pdev = adapter->pdev;
1976 unsigned short offload_type;
1980 u8 hdr_len; 1977 u8 hdr_len;
1981 u32 real_len; 1978 u32 real_len;
1982 unsigned short offload_type;
1983 int err;
1984 1979
1985 if (skb_is_gso(skb)) { 1980 if (skb_is_gso(skb)) {
1986 if (skb_header_cloned(skb)) { 1981 int err;
1987 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1982
1988 if (unlikely(err)) 1983 err = skb_cow_head(skb, 0);
1989 return -1; 1984 if (err < 0)
1990 } 1985 return err;
1986
1991 offload_type = skb_shinfo(skb)->gso_type; 1987 offload_type = skb_shinfo(skb)->gso_type;
1992 1988
1993 if (offload_type & SKB_GSO_TCPV4) { 1989 if (offload_type & SKB_GSO_TCPV4) {
@@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
2085 while (index != tpd_ring->next_to_use) { 2081 while (index != tpd_ring->next_to_use) {
2086 tpd = ATL1C_TPD_DESC(tpd_ring, index); 2082 tpd = ATL1C_TPD_DESC(tpd_ring, index);
2087 buffer_info = &tpd_ring->buffer_info[index]; 2083 buffer_info = &tpd_ring->buffer_info[index];
2088 atl1c_clean_buffer(adpt->pdev, buffer_info, 0); 2084 atl1c_clean_buffer(adpt->pdev, buffer_info);
2089 memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); 2085 memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
2090 if (++index == tpd_ring->count) 2086 if (++index == tpd_ring->count)
2091 index = 0; 2087 index = 0;
@@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2258 /* roll back tpd/buffer */ 2254 /* roll back tpd/buffer */
2259 atl1c_tx_rollback(adapter, tpd, type); 2255 atl1c_tx_rollback(adapter, tpd, type);
2260 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2256 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2261 dev_kfree_skb(skb); 2257 dev_kfree_skb_any(skb);
2262 } else { 2258 } else {
2263 atl1c_tx_queue(adapter, skb, tpd, type); 2259 atl1c_tx_queue(adapter, skb, tpd, type);
2264 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2260 spin_unlock_irqrestore(&adapter->tx_lock, flags);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 422aab27ea1b..4345332533ad 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1641,17 +1641,17 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
1641static int atl1e_tso_csum(struct atl1e_adapter *adapter, 1641static int atl1e_tso_csum(struct atl1e_adapter *adapter,
1642 struct sk_buff *skb, struct atl1e_tpd_desc *tpd) 1642 struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1643{ 1643{
1644 unsigned short offload_type;
1644 u8 hdr_len; 1645 u8 hdr_len;
1645 u32 real_len; 1646 u32 real_len;
1646 unsigned short offload_type;
1647 int err;
1648 1647
1649 if (skb_is_gso(skb)) { 1648 if (skb_is_gso(skb)) {
1650 if (skb_header_cloned(skb)) { 1649 int err;
1651 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1650
1652 if (unlikely(err)) 1651 err = skb_cow_head(skb, 0);
1653 return -1; 1652 if (err < 0)
1654 } 1653 return err;
1654
1655 offload_type = skb_shinfo(skb)->gso_type; 1655 offload_type = skb_shinfo(skb)->gso_type;
1656 1656
1657 if (offload_type & SKB_GSO_TCPV4) { 1657 if (offload_type & SKB_GSO_TCPV4) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 287272dd69da..dfd0e91fa726 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2118,18 +2118,17 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
2118} 2118}
2119 2119
2120static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, 2120static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
2121 struct tx_packet_desc *ptpd) 2121 struct tx_packet_desc *ptpd)
2122{ 2122{
2123 u8 hdr_len, ip_off; 2123 u8 hdr_len, ip_off;
2124 u32 real_len; 2124 u32 real_len;
2125 int err;
2126 2125
2127 if (skb_shinfo(skb)->gso_size) { 2126 if (skb_shinfo(skb)->gso_size) {
2128 if (skb_header_cloned(skb)) { 2127 int err;
2129 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2128
2130 if (unlikely(err)) 2129 err = skb_cow_head(skb, 0);
2131 return -1; 2130 if (err < 0)
2132 } 2131 return err;
2133 2132
2134 if (skb->protocol == htons(ETH_P_IP)) { 2133 if (skb->protocol == htons(ETH_P_IP)) {
2135 struct iphdr *iph = ip_hdr(skb); 2134 struct iphdr *iph = ip_hdr(skb);
@@ -2175,7 +2174,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
2175 return 3; 2174 return 3;
2176 } 2175 }
2177 } 2176 }
2178 return false; 2177 return 0;
2179} 2178}
2180 2179
2181static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, 2180static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 265ce1b752ed..78befb522a52 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -55,6 +55,7 @@ static const char atl2_driver_name[] = "atl2";
55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; 55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
56static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; 56static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
57static const char atl2_driver_version[] = ATL2_DRV_VERSION; 57static const char atl2_driver_version[] = ATL2_DRV_VERSION;
58static const struct ethtool_ops atl2_ethtool_ops;
58 59
59MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>"); 60MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
60MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); 61MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -71,8 +72,6 @@ static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
71}; 72};
72MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); 73MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
73 74
74static void atl2_set_ethtool_ops(struct net_device *netdev);
75
76static void atl2_check_options(struct atl2_adapter *adapter); 75static void atl2_check_options(struct atl2_adapter *adapter);
77 76
78/** 77/**
@@ -1397,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1397 atl2_setup_pcicmd(pdev); 1396 atl2_setup_pcicmd(pdev);
1398 1397
1399 netdev->netdev_ops = &atl2_netdev_ops; 1398 netdev->netdev_ops = &atl2_netdev_ops;
1400 atl2_set_ethtool_ops(netdev); 1399 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
1401 netdev->watchdog_timeo = 5 * HZ; 1400 netdev->watchdog_timeo = 5 * HZ;
1402 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1403 1402
@@ -2105,11 +2104,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
2105 .set_eeprom = atl2_set_eeprom, 2104 .set_eeprom = atl2_set_eeprom,
2106}; 2105};
2107 2106
2108static void atl2_set_ethtool_ops(struct net_device *netdev)
2109{
2110 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
2111}
2112
2113#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ 2107#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
2114 (((a) & 0xff00ff00) >> 8)) 2108 (((a) & 0xff00ff00) >> 8))
2115#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) 2109#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 3f97d9fd0a71..85dbddd03722 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -60,6 +60,17 @@ config BCM63XX_ENET
60 This driver supports the ethernet MACs in the Broadcom 63xx 60 This driver supports the ethernet MACs in the Broadcom 63xx
61 MIPS chipset family (BCM63XX). 61 MIPS chipset family (BCM63XX).
62 62
63config BCMGENET
64 tristate "Broadcom GENET internal MAC support"
65 depends on OF
66 select MII
67 select PHYLIB
68 select FIXED_PHY if BCMGENET=y
69 select BCM7XXX_PHY
70 help
71 This driver supports the built-in Ethernet MACs found in the
72 Broadcom BCM7xxx Set Top Box family chipset.
73
63config BNX2 74config BNX2
64 tristate "Broadcom NetXtremeII support" 75 tristate "Broadcom NetXtremeII support"
65 depends on PCI 76 depends on PCI
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 68efa1a3fb88..fd639a0d4c7d 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_B44) += b44.o 5obj-$(CONFIG_B44) += b44.o
6obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 6obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
7obj-$(CONFIG_BCMGENET) += genet/
7obj-$(CONFIG_BNX2) += bnx2.o 8obj-$(CONFIG_BNX2) += bnx2.o
8obj-$(CONFIG_CNIC) += cnic.o 9obj-$(CONFIG_CNIC) += cnic.o
9obj-$(CONFIG_BNX2X) += bnx2x/ 10obj-$(CONFIG_BNX2X) += bnx2x/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 8a7bf7dad898..05ba62589017 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1685,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1685 unsigned int start; 1685 unsigned int start;
1686 1686
1687 do { 1687 do {
1688 start = u64_stats_fetch_begin_bh(&hwstat->syncp); 1688 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1689 1689
1690 /* Convert HW stats into rtnl_link_stats64 stats. */ 1690 /* Convert HW stats into rtnl_link_stats64 stats. */
1691 nstat->rx_packets = hwstat->rx_pkts; 1691 nstat->rx_packets = hwstat->rx_pkts;
@@ -1719,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1719 /* Carrier lost counter seems to be broken for some devices */ 1719 /* Carrier lost counter seems to be broken for some devices */
1720 nstat->tx_carrier_errors = hwstat->tx_carrier_lost; 1720 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1721#endif 1721#endif
1722 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); 1722 } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1723 1723
1724 return nstat; 1724 return nstat;
1725} 1725}
@@ -2073,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
2073 do { 2073 do {
2074 data_src = &hwstat->tx_good_octets; 2074 data_src = &hwstat->tx_good_octets;
2075 data_dst = data; 2075 data_dst = data;
2076 start = u64_stats_fetch_begin_bh(&hwstat->syncp); 2076 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2077 2077
2078 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) 2078 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2079 *data_dst++ = *data_src++; 2079 *data_dst++ = *data_src++;
2080 2080
2081 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); 2081 } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2082} 2082}
2083 2083
2084static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2084static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b9a5fb6400d3..a7d11f5565d6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1722,9 +1722,6 @@ static const struct net_device_ops bcm_enet_ops = {
1722 .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1722 .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1723 .ndo_do_ioctl = bcm_enet_ioctl, 1723 .ndo_do_ioctl = bcm_enet_ioctl,
1724 .ndo_change_mtu = bcm_enet_change_mtu, 1724 .ndo_change_mtu = bcm_enet_change_mtu,
1725#ifdef CONFIG_NET_POLL_CONTROLLER
1726 .ndo_poll_controller = bcm_enet_netpoll,
1727#endif
1728}; 1725};
1729 1726
1730/* 1727/*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6c9e1c9bdeb8..a8efb18e42fa 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2886 sw_cons = BNX2_NEXT_TX_BD(sw_cons); 2886 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2887 2887
2888 tx_bytes += skb->len; 2888 tx_bytes += skb->len;
2889 dev_kfree_skb(skb); 2889 dev_kfree_skb_any(skb);
2890 tx_pkt++; 2890 tx_pkt++;
2891 if (tx_pkt == budget) 2891 if (tx_pkt == budget)
2892 break; 2892 break;
@@ -3133,6 +3133,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3133 struct l2_fhdr *rx_hdr; 3133 struct l2_fhdr *rx_hdr;
3134 int rx_pkt = 0, pg_ring_used = 0; 3134 int rx_pkt = 0, pg_ring_used = 0;
3135 3135
3136 if (budget <= 0)
3137 return rx_pkt;
3138
3136 hw_cons = bnx2_get_hw_rx_cons(bnapi); 3139 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3137 sw_cons = rxr->rx_cons; 3140 sw_cons = rxr->rx_cons;
3138 sw_prod = rxr->rx_prod; 3141 sw_prod = rxr->rx_prod;
@@ -6235,7 +6238,7 @@ bnx2_free_irq(struct bnx2 *bp)
6235static void 6238static void
6236bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) 6239bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6237{ 6240{
6238 int i, total_vecs, rc; 6241 int i, total_vecs;
6239 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; 6242 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6240 struct net_device *dev = bp->dev; 6243 struct net_device *dev = bp->dev;
6241 const int len = sizeof(bp->irq_tbl[0].name); 6244 const int len = sizeof(bp->irq_tbl[0].name);
@@ -6258,16 +6261,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6258#ifdef BCM_CNIC 6261#ifdef BCM_CNIC
6259 total_vecs++; 6262 total_vecs++;
6260#endif 6263#endif
6261 rc = -ENOSPC; 6264 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6262 while (total_vecs >= BNX2_MIN_MSIX_VEC) { 6265 BNX2_MIN_MSIX_VEC, total_vecs);
6263 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); 6266 if (total_vecs < 0)
6264 if (rc <= 0)
6265 break;
6266 if (rc > 0)
6267 total_vecs = rc;
6268 }
6269
6270 if (rc != 0)
6271 return; 6267 return;
6272 6268
6273 msix_vecs = total_vecs; 6269 msix_vecs = total_vecs;
@@ -6640,7 +6636,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6640 6636
6641 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); 6637 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6642 if (dma_mapping_error(&bp->pdev->dev, mapping)) { 6638 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6643 dev_kfree_skb(skb); 6639 dev_kfree_skb_any(skb);
6644 return NETDEV_TX_OK; 6640 return NETDEV_TX_OK;
6645 } 6641 }
6646 6642
@@ -6733,7 +6729,7 @@ dma_error:
6733 PCI_DMA_TODEVICE); 6729 PCI_DMA_TODEVICE);
6734 } 6730 }
6735 6731
6736 dev_kfree_skb(skb); 6732 dev_kfree_skb_any(skb);
6737 return NETDEV_TX_OK; 6733 return NETDEV_TX_OK;
6738} 6734}
6739 6735
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 391f29ef6d2e..4d8f8aba0ea5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
26 * (you will need to reboot afterwards) */ 26 * (you will need to reboot afterwards) */
27/* #define BNX2X_STOP_ON_ERROR */ 27/* #define BNX2X_STOP_ON_ERROR */
28 28
29#define DRV_MODULE_VERSION "1.78.17-0" 29#define DRV_MODULE_VERSION "1.78.19-0"
30#define DRV_MODULE_RELDATE "2013/04/11" 30#define DRV_MODULE_RELDATE "2014/02/10"
31#define BNX2X_BC_VER 0x040200 31#define BNX2X_BC_VER 0x040200
32 32
33#if defined(CONFIG_DCB) 33#if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
75#define BNX2X_MSG_DCB 0x8000000 75#define BNX2X_MSG_DCB 0x8000000
76 76
77/* regular debug print */ 77/* regular debug print */
78#define DP_INNER(fmt, ...) \
79 pr_notice("[%s:%d(%s)]" fmt, \
80 __func__, __LINE__, \
81 bp->dev ? (bp->dev->name) : "?", \
82 ##__VA_ARGS__);
83
78#define DP(__mask, fmt, ...) \ 84#define DP(__mask, fmt, ...) \
79do { \ 85do { \
80 if (unlikely(bp->msg_enable & (__mask))) \ 86 if (unlikely(bp->msg_enable & (__mask))) \
81 pr_notice("[%s:%d(%s)]" fmt, \ 87 DP_INNER(fmt, ##__VA_ARGS__); \
82 __func__, __LINE__, \ 88} while (0)
83 bp->dev ? (bp->dev->name) : "?", \ 89
84 ##__VA_ARGS__); \ 90#define DP_AND(__mask, fmt, ...) \
91do { \
92 if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
93 DP_INNER(fmt, ##__VA_ARGS__); \
85} while (0) 94} while (0)
86 95
87#define DP_CONT(__mask, fmt, ...) \ 96#define DP_CONT(__mask, fmt, ...) \
@@ -1146,10 +1155,6 @@ struct bnx2x_port {
1146 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 1155 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
1147 1156
1148/* slow path */ 1157/* slow path */
1149
1150/* slow path work-queue */
1151extern struct workqueue_struct *bnx2x_wq;
1152
1153#define BNX2X_MAX_NUM_OF_VFS 64 1158#define BNX2X_MAX_NUM_OF_VFS 64
1154#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ 1159#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
1155#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) 1160#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
@@ -1261,6 +1266,7 @@ struct bnx2x_slowpath {
1261 union { 1266 union {
1262 struct client_init_ramrod_data init_data; 1267 struct client_init_ramrod_data init_data;
1263 struct client_update_ramrod_data update_data; 1268 struct client_update_ramrod_data update_data;
1269 struct tpa_update_ramrod_data tpa_data;
1264 } q_rdata; 1270 } q_rdata;
1265 1271
1266 union { 1272 union {
@@ -1392,7 +1398,7 @@ struct bnx2x_fw_stats_data {
1392}; 1398};
1393 1399
1394/* Public slow path states */ 1400/* Public slow path states */
1395enum { 1401enum sp_rtnl_flag {
1396 BNX2X_SP_RTNL_SETUP_TC, 1402 BNX2X_SP_RTNL_SETUP_TC,
1397 BNX2X_SP_RTNL_TX_TIMEOUT, 1403 BNX2X_SP_RTNL_TX_TIMEOUT,
1398 BNX2X_SP_RTNL_FAN_FAILURE, 1404 BNX2X_SP_RTNL_FAN_FAILURE,
@@ -1403,6 +1409,12 @@ enum {
1403 BNX2X_SP_RTNL_RX_MODE, 1409 BNX2X_SP_RTNL_RX_MODE,
1404 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1410 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1405 BNX2X_SP_RTNL_TX_STOP, 1411 BNX2X_SP_RTNL_TX_STOP,
1412 BNX2X_SP_RTNL_GET_DRV_VERSION,
1413};
1414
1415enum bnx2x_iov_flag {
1416 BNX2X_IOV_HANDLE_VF_MSG,
1417 BNX2X_IOV_HANDLE_FLR,
1406}; 1418};
1407 1419
1408struct bnx2x_prev_path_list { 1420struct bnx2x_prev_path_list {
@@ -1603,6 +1615,8 @@ struct bnx2x {
1603 int mrrs; 1615 int mrrs;
1604 1616
1605 struct delayed_work sp_task; 1617 struct delayed_work sp_task;
1618 struct delayed_work iov_task;
1619
1606 atomic_t interrupt_occurred; 1620 atomic_t interrupt_occurred;
1607 struct delayed_work sp_rtnl_task; 1621 struct delayed_work sp_rtnl_task;
1608 1622
@@ -1693,6 +1707,10 @@ struct bnx2x {
1693 struct bnx2x_slowpath *slowpath; 1707 struct bnx2x_slowpath *slowpath;
1694 dma_addr_t slowpath_mapping; 1708 dma_addr_t slowpath_mapping;
1695 1709
1710 /* Mechanism protecting the drv_info_to_mcp */
1711 struct mutex drv_info_mutex;
1712 bool drv_info_mng_owner;
1713
1696 /* Total number of FW statistics requests */ 1714 /* Total number of FW statistics requests */
1697 u8 fw_stats_num; 1715 u8 fw_stats_num;
1698 1716
@@ -1882,6 +1900,9 @@ struct bnx2x {
1882 /* operation indication for the sp_rtnl task */ 1900 /* operation indication for the sp_rtnl task */
1883 unsigned long sp_rtnl_state; 1901 unsigned long sp_rtnl_state;
1884 1902
1903 /* Indication of the IOV tasks */
1904 unsigned long iov_task_state;
1905
1885 /* DCBX Negotiation results */ 1906 /* DCBX Negotiation results */
1886 struct dcbx_features dcbx_local_feat; 1907 struct dcbx_features dcbx_local_feat;
1887 u32 dcbx_error; 1908 u32 dcbx_error;
@@ -2525,6 +2546,8 @@ enum {
2525 2546
2526void bnx2x_set_local_cmng(struct bnx2x *bp); 2547void bnx2x_set_local_cmng(struct bnx2x *bp);
2527 2548
2549void bnx2x_update_mng_version(struct bnx2x *bp);
2550
2528#define MCPR_SCRATCH_BASE(bp) \ 2551#define MCPR_SCRATCH_BASE(bp) \
2529 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2552 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2530 2553
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dbcff509dc3f..9261d5313b5b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
61 61
62static int bnx2x_calc_num_queues(struct bnx2x *bp) 62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{ 63{
64 return bnx2x_num_queues ? 64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65 min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) : 65
66 min_t(int, netif_get_num_default_rss_queues(), 66 /* Reduce memory usage in kdump environment by using only one queue */
67 BNX2X_MAX_QUEUES(bp)); 67 if (reset_devices)
68 nq = 1;
69
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 return nq;
68} 72}
69 73
70/** 74/**
@@ -868,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
868 if (unlikely(bp->panic)) 872 if (unlikely(bp->panic))
869 return 0; 873 return 0;
870#endif 874#endif
875 if (budget <= 0)
876 return rx_pkt;
871 877
872 bd_cons = fp->rx_bd_cons; 878 bd_cons = fp->rx_bd_cons;
873 bd_prod = fp->rx_bd_prod; 879 bd_prod = fp->rx_bd_prod;
@@ -1638,36 +1644,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1638 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", 1644 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1639 msix_vec); 1645 msix_vec);
1640 1646
1641 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec); 1647 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1642 1648 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1643 /* 1649 /*
1644 * reconfigure number of tx/rx queues according to available 1650 * reconfigure number of tx/rx queues according to available
1645 * MSI-X vectors 1651 * MSI-X vectors
1646 */ 1652 */
1647 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) { 1653 if (rc == -ENOSPC) {
1648 /* how less vectors we will have? */
1649 int diff = msix_vec - rc;
1650
1651 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1652
1653 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1654
1655 if (rc) {
1656 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1657 goto no_msix;
1658 }
1659 /*
1660 * decrease number of queues by number of unallocated entries
1661 */
1662 bp->num_ethernet_queues -= diff;
1663 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1664
1665 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1666 bp->num_queues);
1667 } else if (rc > 0) {
1668 /* Get by with single vector */ 1654 /* Get by with single vector */
1669 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1); 1655 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1670 if (rc) { 1656 if (rc < 0) {
1671 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", 1657 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1672 rc); 1658 rc);
1673 goto no_msix; 1659 goto no_msix;
@@ -1680,8 +1666,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1680 bp->num_ethernet_queues = 1; 1666 bp->num_ethernet_queues = 1;
1681 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1667 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1682 } else if (rc < 0) { 1668 } else if (rc < 0) {
1683 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1669 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1684 goto no_msix; 1670 goto no_msix;
1671 } else if (rc < msix_vec) {
1672 /* how less vectors we will have? */
1673 int diff = msix_vec - rc;
1674
1675 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1676
1677 /*
1678 * decrease number of queues by number of unallocated entries
1679 */
1680 bp->num_ethernet_queues -= diff;
1681 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1682
1683 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1684 bp->num_queues);
1685 } 1685 }
1686 1686
1687 bp->flags |= USING_MSIX_FLAG; 1687 bp->flags |= USING_MSIX_FLAG;
@@ -2234,8 +2234,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2234 sizeof(struct per_queue_stats) * num_queue_stats + 2234 sizeof(struct per_queue_stats) * num_queue_stats +
2235 sizeof(struct stats_counter); 2235 sizeof(struct stats_counter);
2236 2236
2237 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, 2237 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2238 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 2238 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2239 if (!bp->fw_stats)
2240 goto alloc_mem_err;
2239 2241
2240 /* Set shortcuts */ 2242 /* Set shortcuts */
2241 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; 2243 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
@@ -2802,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2802 if (CNIC_ENABLED(bp)) 2804 if (CNIC_ENABLED(bp))
2803 bnx2x_load_cnic(bp); 2805 bnx2x_load_cnic(bp);
2804 2806
2807 if (IS_PF(bp))
2808 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2809
2805 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 2810 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2806 /* mark driver is loaded in shmem2 */ 2811 /* mark driver is loaded in shmem2 */
2807 u32 val; 2812 u32 val;
@@ -3028,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3028 bp->state = BNX2X_STATE_CLOSED; 3033 bp->state = BNX2X_STATE_CLOSED;
3029 bp->cnic_loaded = false; 3034 bp->cnic_loaded = false;
3030 3035
3036 /* Clear driver version indication in shmem */
3037 if (IS_PF(bp))
3038 bnx2x_update_mng_version(bp);
3039
3031 /* Check if there are pending parity attentions. If there are - set 3040 /* Check if there are pending parity attentions. If there are - set
3032 * RECOVERY_IN_PROGRESS. 3041 * RECOVERY_IN_PROGRESS.
3033 */ 3042 */
@@ -4370,14 +4379,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4370 4379
4371 if (!IS_FCOE_IDX(index)) { 4380 if (!IS_FCOE_IDX(index)) {
4372 /* status blocks */ 4381 /* status blocks */
4373 if (!CHIP_IS_E1x(bp)) 4382 if (!CHIP_IS_E1x(bp)) {
4374 BNX2X_PCI_ALLOC(sb->e2_sb, 4383 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4375 &bnx2x_fp(bp, index, status_blk_mapping), 4384 sizeof(struct host_hc_status_block_e2));
4376 sizeof(struct host_hc_status_block_e2)); 4385 if (!sb->e2_sb)
4377 else 4386 goto alloc_mem_err;
4378 BNX2X_PCI_ALLOC(sb->e1x_sb, 4387 } else {
4379 &bnx2x_fp(bp, index, status_blk_mapping), 4388 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4380 sizeof(struct host_hc_status_block_e1x)); 4389 sizeof(struct host_hc_status_block_e1x));
4390 if (!sb->e1x_sb)
4391 goto alloc_mem_err;
4392 }
4381 } 4393 }
4382 4394
4383 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 4395 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -4396,35 +4408,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4396 "allocating tx memory of fp %d cos %d\n", 4408 "allocating tx memory of fp %d cos %d\n",
4397 index, cos); 4409 index, cos);
4398 4410
4399 BNX2X_ALLOC(txdata->tx_buf_ring, 4411 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4400 sizeof(struct sw_tx_bd) * NUM_TX_BD); 4412 sizeof(struct sw_tx_bd),
4401 BNX2X_PCI_ALLOC(txdata->tx_desc_ring, 4413 GFP_KERNEL);
4402 &txdata->tx_desc_mapping, 4414 if (!txdata->tx_buf_ring)
4403 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 4415 goto alloc_mem_err;
4416 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4417 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4418 if (!txdata->tx_desc_ring)
4419 goto alloc_mem_err;
4404 } 4420 }
4405 } 4421 }
4406 4422
4407 /* Rx */ 4423 /* Rx */
4408 if (!skip_rx_queue(bp, index)) { 4424 if (!skip_rx_queue(bp, index)) {
4409 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 4425 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4410 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), 4426 bnx2x_fp(bp, index, rx_buf_ring) =
4411 sizeof(struct sw_rx_bd) * NUM_RX_BD); 4427 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4412 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), 4428 if (!bnx2x_fp(bp, index, rx_buf_ring))
4413 &bnx2x_fp(bp, index, rx_desc_mapping), 4429 goto alloc_mem_err;
4414 sizeof(struct eth_rx_bd) * NUM_RX_BD); 4430 bnx2x_fp(bp, index, rx_desc_ring) =
4431 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4432 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4433 if (!bnx2x_fp(bp, index, rx_desc_ring))
4434 goto alloc_mem_err;
4415 4435
4416 /* Seed all CQEs by 1s */ 4436 /* Seed all CQEs by 1s */
4417 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), 4437 bnx2x_fp(bp, index, rx_comp_ring) =
4418 &bnx2x_fp(bp, index, rx_comp_mapping), 4438 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4419 sizeof(struct eth_fast_path_rx_cqe) * 4439 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4420 NUM_RCQ_BD); 4440 if (!bnx2x_fp(bp, index, rx_comp_ring))
4441 goto alloc_mem_err;
4421 4442
4422 /* SGE ring */ 4443 /* SGE ring */
4423 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), 4444 bnx2x_fp(bp, index, rx_page_ring) =
4424 sizeof(struct sw_rx_page) * NUM_RX_SGE); 4445 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4425 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), 4446 GFP_KERNEL);
4426 &bnx2x_fp(bp, index, rx_sge_mapping), 4447 if (!bnx2x_fp(bp, index, rx_page_ring))
4427 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 4448 goto alloc_mem_err;
4449 bnx2x_fp(bp, index, rx_sge_ring) =
4450 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4451 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4452 if (!bnx2x_fp(bp, index, rx_sge_ring))
4453 goto alloc_mem_err;
4428 /* RX BD ring */ 4454 /* RX BD ring */
4429 bnx2x_set_next_page_rx_bd(fp); 4455 bnx2x_set_next_page_rx_bd(fp);
4430 4456
@@ -4780,12 +4806,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
4780 bnx2x_panic(); 4806 bnx2x_panic();
4781#endif 4807#endif
4782 4808
4783 smp_mb__before_clear_bit();
4784 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4785 smp_mb__after_clear_bit();
4786
4787 /* This allows the netif to be shutdown gracefully before resetting */ 4809 /* This allows the netif to be shutdown gracefully before resetting */
4788 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4810 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4789} 4811}
4790 4812
4791int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 4813int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4913,3 +4935,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4913 disable = disable ? 1 : (usec ? 0 : 1); 4935 disable = disable ? 1 : (usec ? 0 : 1);
4914 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); 4936 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4915} 4937}
4938
4939void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4940 u32 verbose)
4941{
4942 smp_mb__before_clear_bit();
4943 set_bit(flag, &bp->sp_rtnl_state);
4944 smp_mb__after_clear_bit();
4945 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4946 flag);
4947 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4948}
4949EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a89a40f88c25..05f4f5f52635 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -47,31 +47,26 @@ extern int bnx2x_num_queues;
47 } \ 47 } \
48 } while (0) 48 } while (0)
49 49
50#define BNX2X_PCI_ALLOC(x, y, size) \ 50#define BNX2X_PCI_ALLOC(y, size) \
51 do { \ 51({ \
52 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 52 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
53 if (x == NULL) \ 53 if (x) \
54 goto alloc_mem_err; \ 54 DP(NETIF_MSG_HW, \
55 DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 55 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
56 (unsigned long long)(*y), x); \ 56 (unsigned long long)(*y), x); \
57 } while (0) 57 x; \
58 58})
59#define BNX2X_PCI_FALLOC(x, y, size) \ 59#define BNX2X_PCI_FALLOC(y, size) \
60 do { \ 60({ \
61 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 61 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
62 if (x == NULL) \ 62 if (x) { \
63 goto alloc_mem_err; \ 63 memset(x, 0xff, size); \
64 memset((void *)x, 0xFFFFFFFF, size); \ 64 DP(NETIF_MSG_HW, \
65 DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ 65 "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
66 (unsigned long long)(*y), x); \ 66 (unsigned long long)(*y), x); \
67 } while (0) 67 } \
68 68 x; \
69#define BNX2X_ALLOC(x, size) \ 69})
70 do { \
71 x = kzalloc(size, GFP_KERNEL); \
72 if (x == NULL) \
73 goto alloc_mem_err; \
74 } while (0)
75 70
76/*********************** Interfaces **************************** 71/*********************** Interfaces ****************************
77 * Functions that need to be implemented by each driver version 72 * Functions that need to be implemented by each driver version
@@ -1324,4 +1319,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
1324int bnx2x_drain_tx_queues(struct bnx2x *bp); 1319int bnx2x_drain_tx_queues(struct bnx2x *bp);
1325void bnx2x_squeeze_objects(struct bnx2x *bp); 1320void bnx2x_squeeze_objects(struct bnx2x *bp);
1326 1321
1322void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
1323 u32 verbose);
1324
1327#endif /* BNX2X_CMN_H */ 1325#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fdace204b054..97ea5421dd96 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
710 * as we are handling an attention on a work queue which must be 710 * as we are handling an attention on a work queue which must be
711 * flushed at some rtnl-locked contexts (e.g. if down) 711 * flushed at some rtnl-locked contexts (e.g. if down)
712 */ 712 */
713 if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 713 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
714 schedule_delayed_work(&bp->sp_rtnl_task, 0);
715} 714}
716 715
717void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) 716void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
764 if (IS_MF(bp)) 763 if (IS_MF(bp))
765 bnx2x_link_sync_notify(bp); 764 bnx2x_link_sync_notify(bp);
766 765
767 set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); 766 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
768
769 schedule_delayed_work(&bp->sp_rtnl_task, 0);
770
771 return; 767 return;
772 } 768 }
773 case BNX2X_DCBX_STATE_TX_PAUSED: 769 case BNX2X_DCBX_STATE_TX_PAUSED:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 38fc794c1655..b6de05e3149b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev,
2969#define IS_PORT_STAT(i) \ 2969#define IS_PORT_STAT(i) \
2970 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) 2970 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
2971#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) 2971#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
2972#define IS_MF_MODE_STAT(bp) \ 2972#define HIDE_PORT_STAT(bp) \
2973 (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) 2973 ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
2974 IS_VF(bp))
2974 2975
2975/* ethtool statistics are displayed for all regular ethernet queues and the 2976/* ethtool statistics are displayed for all regular ethernet queues and the
2976 * fcoe L2 queue if not disabled 2977 * fcoe L2 queue if not disabled
@@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2992 BNX2X_NUM_Q_STATS; 2993 BNX2X_NUM_Q_STATS;
2993 } else 2994 } else
2994 num_strings = 0; 2995 num_strings = 0;
2995 if (IS_MF_MODE_STAT(bp)) { 2996 if (HIDE_PORT_STAT(bp)) {
2996 for (i = 0; i < BNX2X_NUM_STATS; i++) 2997 for (i = 0; i < BNX2X_NUM_STATS; i++)
2997 if (IS_FUNC_STAT(i)) 2998 if (IS_FUNC_STAT(i))
2998 num_strings++; 2999 num_strings++;
@@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3047 } 3048 }
3048 3049
3049 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 3050 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3050 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 3051 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
3051 continue; 3052 continue;
3052 strcpy(buf + (k + j)*ETH_GSTRING_LEN, 3053 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
3053 bnx2x_stats_arr[i].string); 3054 bnx2x_stats_arr[i].string);
@@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
3105 3106
3106 hw_stats = (u32 *)&bp->eth_stats; 3107 hw_stats = (u32 *)&bp->eth_stats;
3107 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 3108 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3108 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 3109 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
3109 continue; 3110 continue;
3110 if (bnx2x_stats_arr[i].size == 0) { 3111 if (bnx2x_stats_arr[i].size == 0) {
3111 /* skip this counter */ 3112 /* skip this counter */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 84aecdf06f7a..95dc36543548 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -87,7 +87,6 @@
87 (IRO[156].base + ((vfId) * IRO[156].m1)) 87 (IRO[156].base + ((vfId) * IRO[156].m1))
88#define CSTORM_VF_TO_PF_OFFSET(funcId) \ 88#define CSTORM_VF_TO_PF_OFFSET(funcId) \
89 (IRO[150].base + ((funcId) * IRO[150].m1)) 89 (IRO[150].base + ((funcId) * IRO[150].m1))
90#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
91#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ 90#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
92 (IRO[203].base + ((pfId) * IRO[203].m1)) 91 (IRO[203].base + ((pfId) * IRO[203].m1))
93#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) 92#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index cf1df8b62e2c..5ba8af50c84f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2003,6 +2003,23 @@ struct shmem_lfa {
2003 #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) 2003 #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
2004}; 2004};
2005 2005
2006/* Used to support NSCI get OS driver version
2007 * on driver load the version value will be set
2008 * on driver unload driver value of 0x0 will be set.
2009 */
2010struct os_drv_ver {
2011#define DRV_VER_NOT_LOADED 0
2012
2013 /* personalties order is important */
2014#define DRV_PERS_ETHERNET 0
2015#define DRV_PERS_ISCSI 1
2016#define DRV_PERS_FCOE 2
2017
2018 /* shmem2 struct is constant can't add more personalties here */
2019#define MAX_DRV_PERS 3
2020 u32 versions[MAX_DRV_PERS];
2021};
2022
2006struct ncsi_oem_fcoe_features { 2023struct ncsi_oem_fcoe_features {
2007 u32 fcoe_features1; 2024 u32 fcoe_features1;
2008 #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF 2025 #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2217,6 +2234,18 @@ struct shmem2_region {
2217 u32 reserved4; /* Offset 0x150 */ 2234 u32 reserved4; /* Offset 0x150 */
2218 u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ 2235 u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
2219 #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) 2236 #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
2237
2238 u32 reserved5[2];
2239 u32 reserved6[PORT_MAX];
2240
2241 /* driver version for each personality */
2242 struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
2243
2244 /* Flag to the driver that PF's drv_info_host_addr buffer was read */
2245 u32 mfw_drv_indication;
2246
2247 /* We use indication for each PF (0..3) */
2248#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
2220}; 2249};
2221 2250
2222 2251
@@ -2848,7 +2877,7 @@ struct afex_stats {
2848 2877
2849#define BCM_5710_FW_MAJOR_VERSION 7 2878#define BCM_5710_FW_MAJOR_VERSION 7
2850#define BCM_5710_FW_MINOR_VERSION 8 2879#define BCM_5710_FW_MINOR_VERSION 8
2851#define BCM_5710_FW_REVISION_VERSION 17 2880#define BCM_5710_FW_REVISION_VERSION 19
2852#define BCM_5710_FW_ENGINEERING_VERSION 0 2881#define BCM_5710_FW_ENGINEERING_VERSION 0
2853#define BCM_5710_FW_COMPILE_FLAGS 1 2882#define BCM_5710_FW_COMPILE_FLAGS 1
2854 2883
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7d4382286457..a78edaccceee 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -120,7 +120,8 @@ static int debug;
120module_param(debug, int, S_IRUGO); 120module_param(debug, int, S_IRUGO);
121MODULE_PARM_DESC(debug, " Default debug msglevel"); 121MODULE_PARM_DESC(debug, " Default debug msglevel");
122 122
123struct workqueue_struct *bnx2x_wq; 123static struct workqueue_struct *bnx2x_wq;
124struct workqueue_struct *bnx2x_iov_wq;
124 125
125struct bnx2x_mac_vals { 126struct bnx2x_mac_vals {
126 u32 xmac_addr; 127 u32 xmac_addr;
@@ -918,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
918 u16 start = 0, end = 0; 919 u16 start = 0, end = 0;
919 u8 cos; 920 u8 cos;
920#endif 921#endif
921 if (disable_int) 922 if (IS_PF(bp) && disable_int)
922 bnx2x_int_disable(bp); 923 bnx2x_int_disable(bp);
923 924
924 bp->stats_state = STATS_STATE_DISABLED; 925 bp->stats_state = STATS_STATE_DISABLED;
@@ -929,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
929 930
930 /* Indices */ 931 /* Indices */
931 /* Common */ 932 /* Common */
932 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 933 if (IS_PF(bp)) {
933 bp->def_idx, bp->def_att_idx, bp->attn_state, 934 struct host_sp_status_block *def_sb = bp->def_status_blk;
934 bp->spq_prod_idx, bp->stats_counter); 935 int data_size, cstorm_offset;
935 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 936
936 bp->def_status_blk->atten_status_block.attn_bits, 937 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
937 bp->def_status_blk->atten_status_block.attn_bits_ack, 938 bp->def_idx, bp->def_att_idx, bp->attn_state,
938 bp->def_status_blk->atten_status_block.status_block_id, 939 bp->spq_prod_idx, bp->stats_counter);
939 bp->def_status_blk->atten_status_block.attn_bits_index); 940 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
940 BNX2X_ERR(" def ("); 941 def_sb->atten_status_block.attn_bits,
941 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 942 def_sb->atten_status_block.attn_bits_ack,
942 pr_cont("0x%x%s", 943 def_sb->atten_status_block.status_block_id,
943 bp->def_status_blk->sp_sb.index_values[i], 944 def_sb->atten_status_block.attn_bits_index);
944 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 945 BNX2X_ERR(" def (");
945 946 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
946 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 947 pr_cont("0x%x%s",
947 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + 948 def_sb->sp_sb.index_values[i],
948 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 949 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
949 i*sizeof(u32)); 950
950 951 data_size = sizeof(struct hc_sp_status_block_data) /
951 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 952 sizeof(u32);
952 sp_sb_data.igu_sb_id, 953 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
953 sp_sb_data.igu_seg_id, 954 for (i = 0; i < data_size; i++)
954 sp_sb_data.p_func.pf_id, 955 *((u32 *)&sp_sb_data + i) =
955 sp_sb_data.p_func.vnic_id, 956 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
956 sp_sb_data.p_func.vf_id, 957 i * sizeof(u32));
957 sp_sb_data.p_func.vf_valid, 958
958 sp_sb_data.state); 959 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
960 sp_sb_data.igu_sb_id,
961 sp_sb_data.igu_seg_id,
962 sp_sb_data.p_func.pf_id,
963 sp_sb_data.p_func.vnic_id,
964 sp_sb_data.p_func.vf_id,
965 sp_sb_data.p_func.vf_valid,
966 sp_sb_data.state);
967 }
959 968
960 for_each_eth_queue(bp, i) { 969 for_each_eth_queue(bp, i) {
961 struct bnx2x_fastpath *fp = &bp->fp[i]; 970 struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -1013,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1013 pr_cont("0x%x%s", 1022 pr_cont("0x%x%s",
1014 fp->sb_index_values[j], 1023 fp->sb_index_values[j],
1015 (j == loop - 1) ? ")" : " "); 1024 (j == loop - 1) ? ")" : " ");
1025
1026 /* VF cannot access FW refelection for status block */
1027 if (IS_VF(bp))
1028 continue;
1029
1016 /* fw sb data */ 1030 /* fw sb data */
1017 data_size = CHIP_IS_E1x(bp) ? 1031 data_size = CHIP_IS_E1x(bp) ?
1018 sizeof(struct hc_status_block_data_e1x) : 1032 sizeof(struct hc_status_block_data_e1x) :
@@ -1064,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1064 } 1078 }
1065 1079
1066#ifdef BNX2X_STOP_ON_ERROR 1080#ifdef BNX2X_STOP_ON_ERROR
1067 1081 if (IS_PF(bp)) {
1068 /* event queue */ 1082 /* event queue */
1069 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1083 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1070 for (i = 0; i < NUM_EQ_DESC; i++) { 1084 for (i = 0; i < NUM_EQ_DESC; i++) {
1071 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1085 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1072 1086
1073 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", 1087 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1074 i, bp->eq_ring[i].message.opcode, 1088 i, bp->eq_ring[i].message.opcode,
1075 bp->eq_ring[i].message.error); 1089 bp->eq_ring[i].message.error);
1076 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); 1090 BNX2X_ERR("data: %x %x %x\n",
1091 data[0], data[1], data[2]);
1092 }
1077 } 1093 }
1078 1094
1079 /* Rings */ 1095 /* Rings */
@@ -1140,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1140 } 1156 }
1141 } 1157 }
1142#endif 1158#endif
1143 bnx2x_fw_dump(bp); 1159 if (IS_PF(bp)) {
1144 bnx2x_mc_assert(bp); 1160 bnx2x_fw_dump(bp);
1161 bnx2x_mc_assert(bp);
1162 }
1145 BNX2X_ERR("end crash dump -----------------\n"); 1163 BNX2X_ERR("end crash dump -----------------\n");
1146} 1164}
1147 1165
@@ -1814,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1814 drv_cmd = BNX2X_Q_CMD_EMPTY; 1832 drv_cmd = BNX2X_Q_CMD_EMPTY;
1815 break; 1833 break;
1816 1834
1835 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1836 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1837 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1838 break;
1839
1817 default: 1840 default:
1818 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1841 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1819 command, fp->index); 1842 command, fp->index);
@@ -1834,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1834#else 1857#else
1835 return; 1858 return;
1836#endif 1859#endif
1837 /* SRIOV: reschedule any 'in_progress' operations */
1838 bnx2x_iov_sp_event(bp, cid, true);
1839 1860
1840 smp_mb__before_atomic_inc(); 1861 smp_mb__before_atomic_inc();
1841 atomic_inc(&bp->cq_spq_left); 1862 atomic_inc(&bp->cq_spq_left);
@@ -3460,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
3460 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3481 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3461} 3482}
3462 3483
3484#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3485#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3486
3463static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3487static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3464{ 3488{
3465 enum drv_info_opcode op_code; 3489 enum drv_info_opcode op_code;
3466 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3490 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3491 bool release = false;
3492 int wait;
3467 3493
3468 /* if drv_info version supported by MFW doesn't match - send NACK */ 3494 /* if drv_info version supported by MFW doesn't match - send NACK */
3469 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3495 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@@ -3474,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3474 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3500 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3475 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3501 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3476 3502
3503 /* Must prevent other flows from accessing drv_info_to_mcp */
3504 mutex_lock(&bp->drv_info_mutex);
3505
3477 memset(&bp->slowpath->drv_info_to_mcp, 0, 3506 memset(&bp->slowpath->drv_info_to_mcp, 0,
3478 sizeof(union drv_info_to_mcp)); 3507 sizeof(union drv_info_to_mcp));
3479 3508
@@ -3490,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3490 default: 3519 default:
3491 /* if op code isn't supported - send NACK */ 3520 /* if op code isn't supported - send NACK */
3492 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3521 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3493 return; 3522 goto out;
3494 } 3523 }
3495 3524
3496 /* if we got drv_info attn from MFW then these fields are defined in 3525 /* if we got drv_info attn from MFW then these fields are defined in
@@ -3502,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3502 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3531 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3503 3532
3504 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3533 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3534
3535 /* Since possible management wants both this and get_driver_version
3536 * need to wait until management notifies us it finished utilizing
3537 * the buffer.
3538 */
3539 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3540 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3541 } else if (!bp->drv_info_mng_owner) {
3542 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3543
3544 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3545 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3546
3547 /* Management is done; need to clear indication */
3548 if (indication & bit) {
3549 SHMEM2_WR(bp, mfw_drv_indication,
3550 indication & ~bit);
3551 release = true;
3552 break;
3553 }
3554
3555 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3556 }
3557 }
3558 if (!release) {
3559 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3560 bp->drv_info_mng_owner = true;
3561 }
3562
3563out:
3564 mutex_unlock(&bp->drv_info_mutex);
3565}
3566
3567static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3568{
3569 u8 vals[4];
3570 int i = 0;
3571
3572 if (bnx2x_format) {
3573 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3574 &vals[0], &vals[1], &vals[2], &vals[3]);
3575 if (i > 0)
3576 vals[0] -= '0';
3577 } else {
3578 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3579 &vals[0], &vals[1], &vals[2], &vals[3]);
3580 }
3581
3582 while (i < 4)
3583 vals[i++] = 0;
3584
3585 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3586}
3587
3588void bnx2x_update_mng_version(struct bnx2x *bp)
3589{
3590 u32 iscsiver = DRV_VER_NOT_LOADED;
3591 u32 fcoever = DRV_VER_NOT_LOADED;
3592 u32 ethver = DRV_VER_NOT_LOADED;
3593 int idx = BP_FW_MB_IDX(bp);
3594 u8 *version;
3595
3596 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3597 return;
3598
3599 mutex_lock(&bp->drv_info_mutex);
3600 /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3601 if (bp->drv_info_mng_owner)
3602 goto out;
3603
3604 if (bp->state != BNX2X_STATE_OPEN)
3605 goto out;
3606
3607 /* Parse ethernet driver version */
3608 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3609 if (!CNIC_LOADED(bp))
3610 goto out;
3611
3612 /* Try getting storage driver version via cnic */
3613 memset(&bp->slowpath->drv_info_to_mcp, 0,
3614 sizeof(union drv_info_to_mcp));
3615 bnx2x_drv_info_iscsi_stat(bp);
3616 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3617 iscsiver = bnx2x_update_mng_version_utility(version, false);
3618
3619 memset(&bp->slowpath->drv_info_to_mcp, 0,
3620 sizeof(union drv_info_to_mcp));
3621 bnx2x_drv_info_fcoe_stat(bp);
3622 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3623 fcoever = bnx2x_update_mng_version_utility(version, false);
3624
3625out:
3626 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3627 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3628 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3629
3630 mutex_unlock(&bp->drv_info_mutex);
3631
3632 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3633 ethver, iscsiver, fcoever);
3505} 3634}
3506 3635
3507static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 3636static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -3644,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3644 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3773 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3645 HW_CID(bp, cid)); 3774 HW_CID(bp, cid));
3646 3775
3647 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3776 /* In some cases, type may already contain the func-id
3648 3777 * mainly in SRIOV related use cases, so we add it here only
3649 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3778 * if it's not already set.
3650 SPE_HDR_FUNCTION_ID); 3779 */
3780 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3781 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3782 SPE_HDR_CONN_TYPE;
3783 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3784 SPE_HDR_FUNCTION_ID);
3785 } else {
3786 type = cmd_type;
3787 }
3651 3788
3652 spe->hdr.type = cpu_to_le16(type); 3789 spe->hdr.type = cpu_to_le16(type);
3653 3790
@@ -3878,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
3878 * This is due to some boards consuming sufficient power when driver is 4015 * This is due to some boards consuming sufficient power when driver is
3879 * up to overheat if fan fails. 4016 * up to overheat if fan fails.
3880 */ 4017 */
3881 smp_mb__before_clear_bit(); 4018 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
3882 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3883 smp_mb__after_clear_bit();
3884 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3885} 4019}
3886 4020
3887static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 4021static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4025,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4025 bnx2x_handle_drv_info_req(bp); 4159 bnx2x_handle_drv_info_req(bp);
4026 4160
4027 if (val & DRV_STATUS_VF_DISABLED) 4161 if (val & DRV_STATUS_VF_DISABLED)
4028 bnx2x_vf_handle_flr_event(bp); 4162 bnx2x_schedule_iov_task(bp,
4163 BNX2X_IOV_HANDLE_FLR);
4029 4164
4030 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 4165 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4031 bnx2x_pmf_update(bp); 4166 bnx2x_pmf_update(bp);
@@ -5216,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5216 /* handle eq element */ 5351 /* handle eq element */
5217 switch (opcode) { 5352 switch (opcode) {
5218 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 5353 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5219 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); 5354 bnx2x_vf_mbx_schedule(bp,
5220 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); 5355 &elem->message.data.vf_pf_event);
5221 continue; 5356 continue;
5222 5357
5223 case EVENT_RING_OPCODE_STAT_QUERY: 5358 case EVENT_RING_OPCODE_STAT_QUERY:
5224 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, 5359 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5225 "got statistics comp event %d\n", 5360 "got statistics comp event %d\n",
5226 bp->stats_comp++); 5361 bp->stats_comp++);
5227 /* nothing to do with stats comp */ 5362 /* nothing to do with stats comp */
5228 goto next_spqe; 5363 goto next_spqe;
5229 5364
@@ -5273,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5273 break; 5408 break;
5274 5409
5275 } else { 5410 } else {
5411 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5412
5276 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 5413 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5277 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 5414 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5278 f_obj->complete_cmd(bp, f_obj, 5415 f_obj->complete_cmd(bp, f_obj,
@@ -5282,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5282 * sp_rtnl task as all Queue SP operations 5419 * sp_rtnl task as all Queue SP operations
5283 * should run under rtnl_lock. 5420 * should run under rtnl_lock.
5284 */ 5421 */
5285 smp_mb__before_clear_bit(); 5422 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5286 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5287 &bp->sp_rtnl_state);
5288 smp_mb__after_clear_bit();
5289
5290 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5291 } 5423 }
5292 5424
5293 goto next_spqe; 5425 goto next_spqe;
@@ -5435,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)
5435 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5567 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5436 } 5568 }
5437 5569
5438 /* must be called after the EQ processing (since eq leads to sriov
5439 * ramrod completion flows).
5440 * This flow may have been scheduled by the arrival of a ramrod
5441 * completion, or by the sriov code rescheduling itself.
5442 */
5443 bnx2x_iov_sp_task(bp);
5444
5445 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5570 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5446 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5571 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5447 &bp->sp_state)) { 5572 &bp->sp_state)) {
@@ -6005,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
6005{ 6130{
6006 int i; 6131 int i;
6007 6132
6008 if (IS_MF_SI(bp))
6009 /*
6010 * In switch independent mode, the TSTORM needs to accept
6011 * packets that failed classification, since approximate match
6012 * mac addresses aren't written to NIG LLH
6013 */
6014 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6015 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
6016 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
6017 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6018 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
6019
6020 /* Zero this manually as its initialization is 6133 /* Zero this manually as its initialization is
6021 currently missing in the initTool */ 6134 currently missing in the initTool */
6022 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 6135 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -7989,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp)
7989 8102
7990int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 8103int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7991{ 8104{
7992 if (!CHIP_IS_E1x(bp)) 8105 if (!CHIP_IS_E1x(bp)) {
7993 /* size = the status block + ramrod buffers */ 8106 /* size = the status block + ramrod buffers */
7994 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 8107 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
7995 sizeof(struct host_hc_status_block_e2)); 8108 sizeof(struct host_hc_status_block_e2));
7996 else 8109 if (!bp->cnic_sb.e2_sb)
7997 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, 8110 goto alloc_mem_err;
7998 &bp->cnic_sb_mapping, 8111 } else {
7999 sizeof(struct 8112 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8000 host_hc_status_block_e1x)); 8113 sizeof(struct host_hc_status_block_e1x));
8114 if (!bp->cnic_sb.e1x_sb)
8115 goto alloc_mem_err;
8116 }
8001 8117
8002 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) 8118 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8003 /* allocate searcher T2 table, as it wasn't allocated before */ 8119 /* allocate searcher T2 table, as it wasn't allocated before */
8004 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 8120 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8121 if (!bp->t2)
8122 goto alloc_mem_err;
8123 }
8005 8124
8006 /* write address to which L5 should insert its values */ 8125 /* write address to which L5 should insert its values */
8007 bp->cnic_eth_dev.addr_drv_info_to_mcp = 8126 bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -8022,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
8022{ 8141{
8023 int i, allocated, context_size; 8142 int i, allocated, context_size;
8024 8143
8025 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) 8144 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8026 /* allocate searcher T2 table */ 8145 /* allocate searcher T2 table */
8027 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 8146 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8147 if (!bp->t2)
8148 goto alloc_mem_err;
8149 }
8028 8150
8029 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 8151 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8030 sizeof(struct host_sp_status_block)); 8152 sizeof(struct host_sp_status_block));
8153 if (!bp->def_status_blk)
8154 goto alloc_mem_err;
8031 8155
8032 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 8156 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8033 sizeof(struct bnx2x_slowpath)); 8157 sizeof(struct bnx2x_slowpath));
8158 if (!bp->slowpath)
8159 goto alloc_mem_err;
8034 8160
8035 /* Allocate memory for CDU context: 8161 /* Allocate memory for CDU context:
8036 * This memory is allocated separately and not in the generic ILT 8162 * This memory is allocated separately and not in the generic ILT
@@ -8050,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
8050 for (i = 0, allocated = 0; allocated < context_size; i++) { 8176 for (i = 0, allocated = 0; allocated < context_size; i++) {
8051 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 8177 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8052 (context_size - allocated)); 8178 (context_size - allocated));
8053 BNX2X_PCI_ALLOC(bp->context[i].vcxt, 8179 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8054 &bp->context[i].cxt_mapping, 8180 bp->context[i].size);
8055 bp->context[i].size); 8181 if (!bp->context[i].vcxt)
8182 goto alloc_mem_err;
8056 allocated += bp->context[i].size; 8183 allocated += bp->context[i].size;
8057 } 8184 }
8058 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 8185 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8186 GFP_KERNEL);
8187 if (!bp->ilt->lines)
8188 goto alloc_mem_err;
8059 8189
8060 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 8190 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8061 goto alloc_mem_err; 8191 goto alloc_mem_err;
@@ -8064,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
8064 goto alloc_mem_err; 8194 goto alloc_mem_err;
8065 8195
8066 /* Slow path ring */ 8196 /* Slow path ring */
8067 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 8197 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8198 if (!bp->spq)
8199 goto alloc_mem_err;
8068 8200
8069 /* EQ */ 8201 /* EQ */
8070 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 8202 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8071 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8203 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8204 if (!bp->eq_ring)
8205 goto alloc_mem_err;
8072 8206
8073 return 0; 8207 return 0;
8074 8208
@@ -8849,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
8849 synchronize_irq(bp->pdev->irq); 8983 synchronize_irq(bp->pdev->irq);
8850 8984
8851 flush_workqueue(bnx2x_wq); 8985 flush_workqueue(bnx2x_wq);
8986 flush_workqueue(bnx2x_iov_wq);
8852 8987
8853 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8988 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8854 BNX2X_F_STATE_STARTED && tout--) 8989 BNX2X_F_STATE_STARTED && tout--)
@@ -9774,6 +9909,10 @@ sp_rtnl_not_reset:
9774 bnx2x_dcbx_resume_hw_tx(bp); 9909 bnx2x_dcbx_resume_hw_tx(bp);
9775 } 9910 }
9776 9911
9912 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
9913 &bp->sp_rtnl_state))
9914 bnx2x_update_mng_version(bp);
9915
9777 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9916 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9778 * can be called from other contexts as well) 9917 * can be called from other contexts as well)
9779 */ 9918 */
@@ -11724,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11724 11863
11725 mutex_init(&bp->port.phy_mutex); 11864 mutex_init(&bp->port.phy_mutex);
11726 mutex_init(&bp->fw_mb_mutex); 11865 mutex_init(&bp->fw_mb_mutex);
11866 mutex_init(&bp->drv_info_mutex);
11867 bp->drv_info_mng_owner = false;
11727 spin_lock_init(&bp->stats_lock); 11868 spin_lock_init(&bp->stats_lock);
11728 sema_init(&bp->stats_sema, 1); 11869 sema_init(&bp->stats_sema, 1);
11729 11870
11730 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11871 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11731 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11872 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11732 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 11873 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11874 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
11733 if (IS_PF(bp)) { 11875 if (IS_PF(bp)) {
11734 rc = bnx2x_get_hwinfo(bp); 11876 rc = bnx2x_get_hwinfo(bp);
11735 if (rc) 11877 if (rc)
@@ -11771,6 +11913,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11771 11913
11772 bp->disable_tpa = disable_tpa; 11914 bp->disable_tpa = disable_tpa;
11773 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11915 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11916 /* Reduce memory usage in kdump environment by disabling TPA */
11917 bp->disable_tpa |= reset_devices;
11774 11918
11775 /* Set TPA flags */ 11919 /* Set TPA flags */
11776 if (bp->disable_tpa) { 11920 if (bp->disable_tpa) {
@@ -11942,7 +12086,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11942{ 12086{
11943 int mc_count = netdev_mc_count(bp->dev); 12087 int mc_count = netdev_mc_count(bp->dev);
11944 struct bnx2x_mcast_list_elem *mc_mac = 12088 struct bnx2x_mcast_list_elem *mc_mac =
11945 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); 12089 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
11946 struct netdev_hw_addr *ha; 12090 struct netdev_hw_addr *ha;
11947 12091
11948 if (!mc_mac) 12092 if (!mc_mac)
@@ -12064,11 +12208,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
12064 return; 12208 return;
12065 } else { 12209 } else {
12066 /* Schedule an SP task to handle rest of change */ 12210 /* Schedule an SP task to handle rest of change */
12067 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); 12211 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12068 smp_mb__before_clear_bit(); 12212 NETIF_MSG_IFUP);
12069 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
12070 smp_mb__after_clear_bit();
12071 schedule_delayed_work(&bp->sp_rtnl_task, 0);
12072 } 12213 }
12073} 12214}
12074 12215
@@ -12101,11 +12242,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12101 /* configuring mcast to a vf involves sleeping (when we 12242 /* configuring mcast to a vf involves sleeping (when we
12102 * wait for the pf's response). 12243 * wait for the pf's response).
12103 */ 12244 */
12104 smp_mb__before_clear_bit(); 12245 bnx2x_schedule_sp_rtnl(bp,
12105 set_bit(BNX2X_SP_RTNL_VFPF_MCAST, 12246 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12106 &bp->sp_rtnl_state);
12107 smp_mb__after_clear_bit();
12108 schedule_delayed_work(&bp->sp_rtnl_task, 0);
12109 } 12247 }
12110 } 12248 }
12111 12249
@@ -13356,11 +13494,18 @@ static int __init bnx2x_init(void)
13356 pr_err("Cannot create workqueue\n"); 13494 pr_err("Cannot create workqueue\n");
13357 return -ENOMEM; 13495 return -ENOMEM;
13358 } 13496 }
13497 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
13498 if (!bnx2x_iov_wq) {
13499 pr_err("Cannot create iov workqueue\n");
13500 destroy_workqueue(bnx2x_wq);
13501 return -ENOMEM;
13502 }
13359 13503
13360 ret = pci_register_driver(&bnx2x_pci_driver); 13504 ret = pci_register_driver(&bnx2x_pci_driver);
13361 if (ret) { 13505 if (ret) {
13362 pr_err("Cannot register driver\n"); 13506 pr_err("Cannot register driver\n");
13363 destroy_workqueue(bnx2x_wq); 13507 destroy_workqueue(bnx2x_wq);
13508 destroy_workqueue(bnx2x_iov_wq);
13364 } 13509 }
13365 return ret; 13510 return ret;
13366} 13511}
@@ -13372,6 +13517,7 @@ static void __exit bnx2x_cleanup(void)
13372 pci_unregister_driver(&bnx2x_pci_driver); 13517 pci_unregister_driver(&bnx2x_pci_driver);
13373 13518
13374 destroy_workqueue(bnx2x_wq); 13519 destroy_workqueue(bnx2x_wq);
13520 destroy_workqueue(bnx2x_iov_wq);
13375 13521
13376 /* Free globally allocated resources */ 13522 /* Free globally allocated resources */
13377 list_for_each_safe(pos, q, &bnx2x_prev_list) { 13523 list_for_each_safe(pos, q, &bnx2x_prev_list) {
@@ -13765,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13765 REG_WR(bp, scratch_offset + i, 13911 REG_WR(bp, scratch_offset + i,
13766 *(host_addr + i/4)); 13912 *(host_addr + i/4));
13767 } 13913 }
13914 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
13768 break; 13915 break;
13769 } 13916 }
13770 13917
@@ -13782,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13782 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 13929 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13783 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 13930 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13784 } 13931 }
13932 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
13785 break; 13933 break;
13786 } 13934 }
13787 13935
@@ -13887,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13887 14035
13888 rcu_assign_pointer(bp->cnic_ops, ops); 14036 rcu_assign_pointer(bp->cnic_ops, ops);
13889 14037
14038 /* Schedule driver to read CNIC driver versions */
14039 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14040
13890 return 0; 14041 return 0;
13891} 14042}
13892 14043
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0fb6ff2ac8e3..31297266b743 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2277 data->header.rule_cnt, p->rx_accept_flags, 2277 data->header.rule_cnt, p->rx_accept_flags,
2278 p->tx_accept_flags); 2278 p->tx_accept_flags);
2279 2279
2280 /* No need for an explicit memory barrier here as long we would 2280 /* No need for an explicit memory barrier here as long as we
2281 * need to ensure the ordering of writing to the SPQ element 2281 * ensure the ordering of writing to the SPQ element
2282 * and updating of the SPQ producer which involves a memory 2282 * and updating of the SPQ producer which involves a memory
2283 * read and we will have to put a full memory barrier there 2283 * read. If the memory read is removed we will have to put a
2284 * (inside bnx2x_sp_post()). 2284 * full memory barrier there (inside bnx2x_sp_post()).
2285 */ 2285 */
2286 2286
2287 /* Send a ramrod */ 2287 /* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2982 raw->clear_pending(raw); 2982 raw->clear_pending(raw);
2983 return 0; 2983 return 0;
2984 } else { 2984 } else {
2985 /* No need for an explicit memory barrier here as long we would 2985 /* No need for an explicit memory barrier here as long as we
2986 * need to ensure the ordering of writing to the SPQ element 2986 * ensure the ordering of writing to the SPQ element
2987 * and updating of the SPQ producer which involves a memory 2987 * and updating of the SPQ producer which involves a memory
2988 * read and we will have to put a full memory barrier there 2988 * read. If the memory read is removed we will have to put a
2989 * (inside bnx2x_sp_post()). 2989 * full memory barrier there (inside bnx2x_sp_post()).
2990 */ 2990 */
2991 2991
2992 /* Send a ramrod */ 2992 /* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3466 raw->clear_pending(raw); 3466 raw->clear_pending(raw);
3467 return 0; 3467 return 0;
3468 } else { 3468 } else {
3469 /* No need for an explicit memory barrier here as long we would 3469 /* No need for an explicit memory barrier here as long as we
3470 * need to ensure the ordering of writing to the SPQ element 3470 * ensure the ordering of writing to the SPQ element
3471 * and updating of the SPQ producer which involves a memory 3471 * and updating of the SPQ producer which involves a memory
3472 * read and we will have to put a full memory barrier there 3472 * read. If the memory read is removed we will have to put a
3473 * (inside bnx2x_sp_post()). 3473 * full memory barrier there (inside bnx2x_sp_post()).
3474 */ 3474 */
3475 3475
3476 /* Send a ramrod */ 3476 /* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4092 } 4092 }
4093 4093
4094 /* No need for an explicit memory barrier here as long we would 4094 /* No need for an explicit memory barrier here as long as we
4095 * need to ensure the ordering of writing to the SPQ element 4095 * ensure the ordering of writing to the SPQ element
4096 * and updating of the SPQ producer which involves a memory 4096 * and updating of the SPQ producer which involves a memory
4097 * read and we will have to put a full memory barrier there 4097 * read. If the memory read is removed we will have to put a
4098 * (inside bnx2x_sp_post()). 4098 * full memory barrier there (inside bnx2x_sp_post()).
4099 */ 4099 */
4100 4100
4101 /* Send a ramrod */ 4101 /* Send a ramrod */
@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4158 rss_obj->config_rss = bnx2x_setup_rss; 4158 rss_obj->config_rss = bnx2x_setup_rss;
4159} 4159}
4160 4160
4161int validate_vlan_mac(struct bnx2x *bp,
4162 struct bnx2x_vlan_mac_obj *vlan_mac)
4163{
4164 if (!vlan_mac->get_n_elements) {
4165 BNX2X_ERR("vlan mac object was not intialized\n");
4166 return -EINVAL;
4167 }
4168 return 0;
4169}
4170
4171/********************** Queue state object ***********************************/ 4161/********************** Queue state object ***********************************/
4172 4162
4173/** 4163/**
@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4587 /* Fill the ramrod data */ 4577 /* Fill the ramrod data */
4588 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4578 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4589 4579
4590 /* No need for an explicit memory barrier here as long we would 4580 /* No need for an explicit memory barrier here as long as we
4591 * need to ensure the ordering of writing to the SPQ element 4581 * ensure the ordering of writing to the SPQ element
4592 * and updating of the SPQ producer which involves a memory 4582 * and updating of the SPQ producer which involves a memory
4593 * read and we will have to put a full memory barrier there 4583 * read. If the memory read is removed we will have to put a
4594 * (inside bnx2x_sp_post()). 4584 * full memory barrier there (inside bnx2x_sp_post()).
4595 */ 4585 */
4596
4597 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4586 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4598 U64_HI(data_mapping), 4587 U64_HI(data_mapping),
4599 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4588 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4615 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4604 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4616 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4605 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4617 4606
4618 /* No need for an explicit memory barrier here as long we would 4607 /* No need for an explicit memory barrier here as long as we
4619 * need to ensure the ordering of writing to the SPQ element 4608 * ensure the ordering of writing to the SPQ element
4620 * and updating of the SPQ producer which involves a memory 4609 * and updating of the SPQ producer which involves a memory
4621 * read and we will have to put a full memory barrier there 4610 * read. If the memory read is removed we will have to put a
4622 * (inside bnx2x_sp_post()). 4611 * full memory barrier there (inside bnx2x_sp_post()).
4623 */ 4612 */
4624
4625 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4613 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4626 U64_HI(data_mapping), 4614 U64_HI(data_mapping),
4627 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4615 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4659 o->cids[cid_index], rdata->general.client_id, 4647 o->cids[cid_index], rdata->general.client_id,
4660 rdata->general.sp_client_id, rdata->general.cos); 4648 rdata->general.sp_client_id, rdata->general.cos);
4661 4649
4662 /* No need for an explicit memory barrier here as long we would 4650 /* No need for an explicit memory barrier here as long as we
4663 * need to ensure the ordering of writing to the SPQ element 4651 * ensure the ordering of writing to the SPQ element
4664 * and updating of the SPQ producer which involves a memory 4652 * and updating of the SPQ producer which involves a memory
4665 * read and we will have to put a full memory barrier there 4653 * read. If the memory read is removed we will have to put a
4666 * (inside bnx2x_sp_post()). 4654 * full memory barrier there (inside bnx2x_sp_post()).
4667 */ 4655 */
4668
4669 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 4656 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4670 U64_HI(data_mapping), 4657 U64_HI(data_mapping),
4671 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4658 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
4760 /* Fill the ramrod data */ 4747 /* Fill the ramrod data */
4761 bnx2x_q_fill_update_data(bp, o, update_params, rdata); 4748 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4762 4749
4763 /* No need for an explicit memory barrier here as long we would 4750 /* No need for an explicit memory barrier here as long as we
4764 * need to ensure the ordering of writing to the SPQ element 4751 * ensure the ordering of writing to the SPQ element
4765 * and updating of the SPQ producer which involves a memory 4752 * and updating of the SPQ producer which involves a memory
4766 * read and we will have to put a full memory barrier there 4753 * read. If the memory read is removed we will have to put a
4767 * (inside bnx2x_sp_post()). 4754 * full memory barrier there (inside bnx2x_sp_post()).
4768 */ 4755 */
4769
4770 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4756 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4771 o->cids[cid_index], U64_HI(data_mapping), 4757 o->cids[cid_index], U64_HI(data_mapping),
4772 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4758 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4813 return bnx2x_q_send_update(bp, params); 4799 return bnx2x_q_send_update(bp, params);
4814} 4800}
4815 4801
4802static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
4803 struct bnx2x_queue_sp_obj *obj,
4804 struct bnx2x_queue_update_tpa_params *params,
4805 struct tpa_update_ramrod_data *data)
4806{
4807 data->client_id = obj->cl_id;
4808 data->complete_on_both_clients = params->complete_on_both_clients;
4809 data->dont_verify_rings_pause_thr_flg =
4810 params->dont_verify_thr;
4811 data->max_agg_size = cpu_to_le16(params->max_agg_sz);
4812 data->max_sges_for_packet = params->max_sges_pkt;
4813 data->max_tpa_queues = params->max_tpa_queues;
4814 data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
4815 data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
4816 data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
4817 data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
4818 data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
4819 data->tpa_mode = params->tpa_mode;
4820 data->update_ipv4 = params->update_ipv4;
4821 data->update_ipv6 = params->update_ipv6;
4822}
4823
4816static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, 4824static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4817 struct bnx2x_queue_state_params *params) 4825 struct bnx2x_queue_state_params *params)
4818{ 4826{
4819 /* TODO: Not implemented yet. */ 4827 struct bnx2x_queue_sp_obj *o = params->q_obj;
4820 return -1; 4828 struct tpa_update_ramrod_data *rdata =
4829 (struct tpa_update_ramrod_data *)o->rdata;
4830 dma_addr_t data_mapping = o->rdata_mapping;
4831 struct bnx2x_queue_update_tpa_params *update_tpa_params =
4832 &params->params.update_tpa;
4833 u16 type;
4834
4835 /* Clear the ramrod data */
4836 memset(rdata, 0, sizeof(*rdata));
4837
4838 /* Fill the ramrod data */
4839 bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
4840
4841 /* Add the function id inside the type, so that sp post function
4842 * doesn't automatically add the PF func-id, this is required
4843 * for operations done by PFs on behalf of their VFs
4844 */
4845 type = ETH_CONNECTION_TYPE |
4846 ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
4847
4848 /* No need for an explicit memory barrier here as long as we
4849 * ensure the ordering of writing to the SPQ element
4850 * and updating of the SPQ producer which involves a memory
4851 * read. If the memory read is removed we will have to put a
4852 * full memory barrier there (inside bnx2x_sp_post()).
4853 */
4854 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
4855 o->cids[BNX2X_PRIMARY_CID_INDEX],
4856 U64_HI(data_mapping),
4857 U64_LO(data_mapping), type);
4821} 4858}
4822 4859
4823static inline int bnx2x_q_send_halt(struct bnx2x *bp, 4860static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5647 rdata->tx_switch_suspend = switch_update_params->suspend; 5684 rdata->tx_switch_suspend = switch_update_params->suspend;
5648 rdata->echo = SWITCH_UPDATE; 5685 rdata->echo = SWITCH_UPDATE;
5649 5686
5687 /* No need for an explicit memory barrier here as long as we
5688 * ensure the ordering of writing to the SPQ element
5689 * and updating of the SPQ producer which involves a memory
5690 * read. If the memory read is removed we will have to put a
5691 * full memory barrier there (inside bnx2x_sp_post()).
5692 */
5650 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5693 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5651 U64_HI(data_mapping), 5694 U64_HI(data_mapping),
5652 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5695 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5674 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5717 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5675 rdata->echo = AFEX_UPDATE; 5718 rdata->echo = AFEX_UPDATE;
5676 5719
5677 /* No need for an explicit memory barrier here as long we would 5720 /* No need for an explicit memory barrier here as long as we
5678 * need to ensure the ordering of writing to the SPQ element 5721 * ensure the ordering of writing to the SPQ element
5679 * and updating of the SPQ producer which involves a memory 5722 * and updating of the SPQ producer which involves a memory
5680 * read and we will have to put a full memory barrier there 5723 * read. If the memory read is removed we will have to put a
5681 * (inside bnx2x_sp_post()). 5724 * full memory barrier there (inside bnx2x_sp_post()).
5682 */ 5725 */
5683 DP(BNX2X_MSG_SP, 5726 DP(BNX2X_MSG_SP,
5684 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 5727 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5763 rdata->traffic_type_to_priority_cos[i] = 5806 rdata->traffic_type_to_priority_cos[i] =
5764 tx_start_params->traffic_type_to_priority_cos[i]; 5807 tx_start_params->traffic_type_to_priority_cos[i];
5765 5808
5809 /* No need for an explicit memory barrier here as long as we
5810 * ensure the ordering of writing to the SPQ element
5811 * and updating of the SPQ producer which involves a memory
5812 * read. If the memory read is removed we will have to put a
5813 * full memory barrier there (inside bnx2x_sp_post()).
5814 */
5766 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 5815 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5767 U64_HI(data_mapping), 5816 U64_HI(data_mapping),
5768 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5817 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 00d7f214a40a..80f6c790ed88 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
893 u8 cid_index; 893 u8 cid_index;
894}; 894};
895 895
896struct bnx2x_queue_update_tpa_params {
897 dma_addr_t sge_map;
898 u8 update_ipv4;
899 u8 update_ipv6;
900 u8 max_tpa_queues;
901 u8 max_sges_pkt;
902 u8 complete_on_both_clients;
903 u8 dont_verify_thr;
904 u8 tpa_mode;
905 u8 _pad;
906
907 u16 sge_buff_sz;
908 u16 max_agg_sz;
909
910 u16 sge_pause_thr_low;
911 u16 sge_pause_thr_high;
912};
913
896struct rxq_pause_params { 914struct rxq_pause_params {
897 u16 bd_th_lo; 915 u16 bd_th_lo;
898 u16 bd_th_hi; 916 u16 bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
987 /* Params according to the current command */ 1005 /* Params according to the current command */
988 union { 1006 union {
989 struct bnx2x_queue_update_params update; 1007 struct bnx2x_queue_update_params update;
1008 struct bnx2x_queue_update_tpa_params update_tpa;
990 struct bnx2x_queue_setup_params setup; 1009 struct bnx2x_queue_setup_params setup;
991 struct bnx2x_queue_init_params init; 1010 struct bnx2x_queue_init_params init;
992 struct bnx2x_queue_setup_tx_only_params tx_only; 1011 struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
1403void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 1422void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
1404 u8 *ind_table); 1423 u8 *ind_table);
1405 1424
1406int validate_vlan_mac(struct bnx2x *bp,
1407 struct bnx2x_vlan_mac_obj *vlan_mac);
1408#endif /* BNX2X_SP_VERBS */ 1425#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e42f48df6e94..5c523b32db70 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -102,82 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
102 mmiowb(); 102 mmiowb();
103 barrier(); 103 barrier();
104} 104}
105/* VFOP - VF slow-path operation support */
106 105
107#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 106static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
107 struct bnx2x_virtf *vf,
108 bool print_err)
109{
110 if (!bnx2x_leading_vfq(vf, sp_initialized)) {
111 if (print_err)
112 BNX2X_ERR("Slowpath objects not yet initialized!\n");
113 else
114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
115 return false;
116 }
117 return true;
118}
108 119
109/* VFOP operations states */ 120/* VFOP operations states */
110enum bnx2x_vfop_qctor_state {
111 BNX2X_VFOP_QCTOR_INIT,
112 BNX2X_VFOP_QCTOR_SETUP,
113 BNX2X_VFOP_QCTOR_INT_EN
114};
115
116enum bnx2x_vfop_qdtor_state {
117 BNX2X_VFOP_QDTOR_HALT,
118 BNX2X_VFOP_QDTOR_TERMINATE,
119 BNX2X_VFOP_QDTOR_CFCDEL,
120 BNX2X_VFOP_QDTOR_DONE
121};
122
123enum bnx2x_vfop_vlan_mac_state {
124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
125 BNX2X_VFOP_VLAN_MAC_CLEAR,
126 BNX2X_VFOP_VLAN_MAC_CHK_DONE,
127 BNX2X_VFOP_MAC_CONFIG_LIST,
128 BNX2X_VFOP_VLAN_CONFIG_LIST,
129 BNX2X_VFOP_VLAN_CONFIG_LIST_0
130};
131
132enum bnx2x_vfop_qsetup_state {
133 BNX2X_VFOP_QSETUP_CTOR,
134 BNX2X_VFOP_QSETUP_VLAN0,
135 BNX2X_VFOP_QSETUP_DONE
136};
137
138enum bnx2x_vfop_mcast_state {
139 BNX2X_VFOP_MCAST_DEL,
140 BNX2X_VFOP_MCAST_ADD,
141 BNX2X_VFOP_MCAST_CHK_DONE
142};
143enum bnx2x_vfop_qflr_state {
144 BNX2X_VFOP_QFLR_CLR_VLAN,
145 BNX2X_VFOP_QFLR_CLR_MAC,
146 BNX2X_VFOP_QFLR_TERMINATE,
147 BNX2X_VFOP_QFLR_DONE
148};
149
150enum bnx2x_vfop_flr_state {
151 BNX2X_VFOP_FLR_QUEUES,
152 BNX2X_VFOP_FLR_HW
153};
154
155enum bnx2x_vfop_close_state {
156 BNX2X_VFOP_CLOSE_QUEUES,
157 BNX2X_VFOP_CLOSE_HW
158};
159
160enum bnx2x_vfop_rxmode_state {
161 BNX2X_VFOP_RXMODE_CONFIG,
162 BNX2X_VFOP_RXMODE_DONE
163};
164
165enum bnx2x_vfop_qteardown_state {
166 BNX2X_VFOP_QTEARDOWN_RXMODE,
167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
168 BNX2X_VFOP_QTEARDOWN_CLR_MAC,
169 BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
170 BNX2X_VFOP_QTEARDOWN_QDTOR,
171 BNX2X_VFOP_QTEARDOWN_DONE
172};
173
174enum bnx2x_vfop_rss_state {
175 BNX2X_VFOP_RSS_CONFIG,
176 BNX2X_VFOP_RSS_DONE
177};
178
179#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
180
181void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 121void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
182 struct bnx2x_queue_init_params *init_params, 122 struct bnx2x_queue_init_params *init_params,
183 struct bnx2x_queue_setup_params *setup_params, 123 struct bnx2x_queue_setup_params *setup_params,
@@ -221,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
221void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 161void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
222 struct bnx2x_virtf *vf, 162 struct bnx2x_virtf *vf,
223 struct bnx2x_vf_queue *q, 163 struct bnx2x_vf_queue *q,
224 struct bnx2x_vfop_qctor_params *p, 164 struct bnx2x_vf_queue_construct_params *p,
225 unsigned long q_type) 165 unsigned long q_type)
226{ 166{
227 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 167 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
@@ -290,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
290 } 230 }
291} 231}
292 232
293/* VFOP queue construction */ 233static int bnx2x_vf_queue_create(struct bnx2x *bp,
294static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 234 struct bnx2x_virtf *vf, int qid,
235 struct bnx2x_vf_queue_construct_params *qctor)
295{ 236{
296 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 237 struct bnx2x_queue_state_params *q_params;
297 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 238 int rc = 0;
298 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
299 enum bnx2x_vfop_qctor_state state = vfop->state;
300
301 bnx2x_vfop_reset_wq(vf);
302
303 if (vfop->rc < 0)
304 goto op_err;
305
306 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
307
308 switch (state) {
309 case BNX2X_VFOP_QCTOR_INIT:
310
311 /* has this queue already been opened? */
312 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
313 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
314 DP(BNX2X_MSG_IOV,
315 "Entered qctor but queue was already up. Aborting gracefully\n");
316 goto op_done;
317 }
318
319 /* next state */
320 vfop->state = BNX2X_VFOP_QCTOR_SETUP;
321
322 q_params->cmd = BNX2X_Q_CMD_INIT;
323 vfop->rc = bnx2x_queue_state_change(bp, q_params);
324
325 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
326
327 case BNX2X_VFOP_QCTOR_SETUP:
328 /* next state */
329 vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
330
331 /* copy pre-prepared setup params to the queue-state params */
332 vfop->op_p->qctor.qstate.params.setup =
333 vfop->op_p->qctor.prep_qsetup;
334
335 q_params->cmd = BNX2X_Q_CMD_SETUP;
336 vfop->rc = bnx2x_queue_state_change(bp, q_params);
337 239
338 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
339 241
340 case BNX2X_VFOP_QCTOR_INT_EN: 242 /* Prepare ramrod information */
243 q_params = &qctor->qstate;
244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
341 246
342 /* enable interrupts */ 247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
343 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 248 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
344 USTORM_ID, 0, IGU_INT_ENABLE, 0); 249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
345 goto op_done; 250 goto out;
346 default:
347 bnx2x_vfop_default(state);
348 } 251 }
349op_err:
350 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
351 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
352op_done:
353 bnx2x_vfop_end(bp, vf, vfop);
354op_pending:
355 return;
356}
357 252
358static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 253 /* Run Queue 'construction' ramrods */
359 struct bnx2x_virtf *vf, 254 q_params->cmd = BNX2X_Q_CMD_INIT;
360 struct bnx2x_vfop_cmd *cmd, 255 rc = bnx2x_queue_state_change(bp, q_params);
361 int qid) 256 if (rc)
362{ 257 goto out;
363 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
364
365 if (vfop) {
366 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
367 258
368 vfop->args.qctor.qid = qid; 259 memcpy(&q_params->params.setup, &qctor->prep_qsetup,
369 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 260 sizeof(struct bnx2x_queue_setup_params));
261 q_params->cmd = BNX2X_Q_CMD_SETUP;
262 rc = bnx2x_queue_state_change(bp, q_params);
263 if (rc)
264 goto out;
370 265
371 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 266 /* enable interrupts */
372 bnx2x_vfop_qctor, cmd->done); 267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
373 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 268 USTORM_ID, 0, IGU_INT_ENABLE, 0);
374 cmd->block); 269out:
375 } 270 return rc;
376 return -ENOMEM;
377} 271}
378 272
379/* VFOP queue destruction */ 273static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
380static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 274 int qid)
381{ 275{
382 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
383 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 277 BNX2X_Q_CMD_TERMINATE,
384 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 278 BNX2X_Q_CMD_CFC_DEL};
385 enum bnx2x_vfop_qdtor_state state = vfop->state; 279 struct bnx2x_queue_state_params q_params;
386 280 int rc, i;
387 bnx2x_vfop_reset_wq(vf);
388
389 if (vfop->rc < 0)
390 goto op_err;
391
392 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
393
394 switch (state) {
395 case BNX2X_VFOP_QDTOR_HALT:
396
397 /* has this queue already been stopped? */
398 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
399 BNX2X_Q_LOGICAL_STATE_STOPPED) {
400 DP(BNX2X_MSG_IOV,
401 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
402
403 /* next state */
404 vfop->state = BNX2X_VFOP_QDTOR_DONE;
405
406 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
407 }
408
409 /* next state */
410 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
411
412 q_params->cmd = BNX2X_Q_CMD_HALT;
413 vfop->rc = bnx2x_queue_state_change(bp, q_params);
414
415 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
416
417 case BNX2X_VFOP_QDTOR_TERMINATE:
418 /* next state */
419 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
420
421 q_params->cmd = BNX2X_Q_CMD_TERMINATE;
422 vfop->rc = bnx2x_queue_state_change(bp, q_params);
423 281
424 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
425 283
426 case BNX2X_VFOP_QDTOR_CFCDEL: 284 /* Prepare ramrod information */
427 /* next state */ 285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
428 vfop->state = BNX2X_VFOP_QDTOR_DONE; 286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
429 288
430 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
431 vfop->rc = bnx2x_queue_state_change(bp, q_params); 290 BNX2X_Q_LOGICAL_STATE_STOPPED) {
291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
292 goto out;
293 }
432 294
433 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 295 /* Run Queue 'destruction' ramrods */
434op_err: 296 for (i = 0; i < ARRAY_SIZE(cmds); i++) {
435 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 297 q_params.cmd = cmds[i];
436 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 298 rc = bnx2x_queue_state_change(bp, &q_params);
437op_done: 299 if (rc) {
438 case BNX2X_VFOP_QDTOR_DONE: 300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
439 /* invalidate the context */ 301 return rc;
440 if (qdtor->cxt) {
441 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
442 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
443 } 302 }
444 bnx2x_vfop_end(bp, vf, vfop);
445 return;
446 default:
447 bnx2x_vfop_default(state);
448 } 303 }
449op_pending: 304out:
450 return; 305 /* Clean Context */
451} 306 if (bnx2x_vfq(vf, qid, cxt)) {
452 307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
453static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
454 struct bnx2x_virtf *vf,
455 struct bnx2x_vfop_cmd *cmd,
456 int qid)
457{
458 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
459
460 if (vfop) {
461 struct bnx2x_queue_state_params *qstate =
462 &vf->op_params.qctor.qstate;
463
464 memset(qstate, 0, sizeof(*qstate));
465 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
466
467 vfop->args.qdtor.qid = qid;
468 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
469
470 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
471 bnx2x_vfop_qdtor, cmd->done);
472 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
473 cmd->block);
474 } else {
475 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
476 return -ENOMEM;
477 } 309 }
310
311 return 0;
478} 312}
479 313
480static void 314static void
@@ -496,751 +330,293 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
496 BP_VFDB(bp)->vf_sbs_pool++; 330 BP_VFDB(bp)->vf_sbs_pool++;
497} 331}
498 332
499/* VFOP MAC/VLAN helpers */ 333static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
500static inline void bnx2x_vfop_credit(struct bnx2x *bp, 334 struct bnx2x_vlan_mac_obj *obj,
501 struct bnx2x_vfop *vfop, 335 atomic_t *counter)
502 struct bnx2x_vlan_mac_obj *obj)
503{ 336{
504 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 337 struct list_head *pos;
505 338 int read_lock;
506 /* update credit only if there is no error 339 int cnt = 0;
507 * and a valid credit counter
508 */
509 if (!vfop->rc && args->credit) {
510 struct list_head *pos;
511 int read_lock;
512 int cnt = 0;
513
514 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
515 if (read_lock)
516 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
517 340
518 list_for_each(pos, &obj->head) 341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
519 cnt++; 342 if (read_lock)
343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
520 344
521 if (!read_lock) 345 list_for_each(pos, &obj->head)
522 bnx2x_vlan_mac_h_read_unlock(bp, obj); 346 cnt++;
523 347
524 atomic_set(args->credit, cnt); 348 if (!read_lock)
525 } 349 bnx2x_vlan_mac_h_read_unlock(bp, obj);
526}
527
528static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
529 struct bnx2x_vfop_filter *pos,
530 struct bnx2x_vlan_mac_data *user_req)
531{
532 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
533 BNX2X_VLAN_MAC_DEL;
534 350
535 switch (pos->type) { 351 atomic_set(counter, cnt);
536 case BNX2X_VFOP_FILTER_MAC:
537 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
538 break;
539 case BNX2X_VFOP_FILTER_VLAN:
540 user_req->u.vlan.vlan = pos->vid;
541 break;
542 default:
543 BNX2X_ERR("Invalid filter type, skipping\n");
544 return 1;
545 }
546 return 0;
547} 352}
548 353
549static int bnx2x_vfop_config_list(struct bnx2x *bp, 354static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
550 struct bnx2x_vfop_filters *filters, 355 int qid, bool drv_only, bool mac)
551 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
552{ 356{
553 struct bnx2x_vfop_filter *pos, *tmp; 357 struct bnx2x_vlan_mac_ramrod_params ramrod;
554 struct list_head rollback_list, *filters_list = &filters->head; 358 int rc;
555 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
556 int rc = 0, cnt = 0;
557
558 INIT_LIST_HEAD(&rollback_list);
559
560 list_for_each_entry_safe(pos, tmp, filters_list, link) {
561 if (bnx2x_vfop_set_user_req(bp, pos, user_req))
562 continue;
563 359
564 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
565 if (rc >= 0) { 361 mac ? "MACs" : "VLANs");
566 cnt += pos->add ? 1 : -1;
567 list_move(&pos->link, &rollback_list);
568 rc = 0;
569 } else if (rc == -EEXIST) {
570 rc = 0;
571 } else {
572 BNX2X_ERR("Failed to add a new vlan_mac command\n");
573 break;
574 }
575 }
576 362
577 /* rollback if error or too many rules added */ 363 /* Prepare ramrod params */
578 if (rc || cnt > filters->add_cnt) { 364 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
579 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 365 if (mac) {
580 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 366 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
581 pos->add = !pos->add; /* reverse op */ 367 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
582 bnx2x_vfop_set_user_req(bp, pos, user_req); 368 } else {
583 bnx2x_config_vlan_mac(bp, vlan_mac); 369 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
584 list_del(&pos->link); 370 &ramrod.user_req.vlan_mac_flags);
585 } 371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
586 cnt = 0;
587 if (!rc)
588 rc = -EINVAL;
589 } 372 }
590 filters->add_cnt = cnt; 373 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
591 return rc;
592}
593
594/* VFOP set VLAN/MAC */
595static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
596{
597 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
598 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
599 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
600 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
601
602 enum bnx2x_vfop_vlan_mac_state state = vfop->state;
603
604 if (vfop->rc < 0)
605 goto op_err;
606
607 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
608
609 bnx2x_vfop_reset_wq(vf);
610
611 switch (state) {
612 case BNX2X_VFOP_VLAN_MAC_CLEAR:
613 /* next state */
614 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
615
616 /* do delete */
617 vfop->rc = obj->delete_all(bp, obj,
618 &vlan_mac->user_req.vlan_mac_flags,
619 &vlan_mac->ramrod_flags);
620
621 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
622
623 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
624 /* next state */
625 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
626
627 /* do config */
628 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
629 if (vfop->rc == -EEXIST)
630 vfop->rc = 0;
631 374
632 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 375 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
633 376 if (drv_only)
634 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 377 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
635 vfop->rc = !!obj->raw.check_pending(&obj->raw); 378 else
636 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 379 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
637
638 case BNX2X_VFOP_MAC_CONFIG_LIST:
639 /* next state */
640 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
641
642 /* do list config */
643 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
644 if (vfop->rc)
645 goto op_err;
646
647 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
648 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
649 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
650
651 case BNX2X_VFOP_VLAN_CONFIG_LIST:
652 /* next state */
653 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
654
655 /* do list config */
656 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
657 if (!vfop->rc) {
658 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
659 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
660 }
661 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
662 380
663 default: 381 /* Start deleting */
664 bnx2x_vfop_default(state); 382 rc = ramrod.vlan_mac_obj->delete_all(bp,
383 ramrod.vlan_mac_obj,
384 &ramrod.user_req.vlan_mac_flags,
385 &ramrod.ramrod_flags);
386 if (rc) {
387 BNX2X_ERR("Failed to delete all %s\n",
388 mac ? "MACs" : "VLANs");
389 return rc;
665 } 390 }
666op_err:
667 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
668op_done:
669 kfree(filters);
670 bnx2x_vfop_credit(bp, vfop, obj);
671 bnx2x_vfop_end(bp, vf, vfop);
672op_pending:
673 return;
674}
675
676struct bnx2x_vfop_vlan_mac_flags {
677 bool drv_only;
678 bool dont_consume;
679 bool single_cmd;
680 bool add;
681};
682
683static void
684bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
685 struct bnx2x_vfop_vlan_mac_flags *flags)
686{
687 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
688
689 memset(ramrod, 0, sizeof(*ramrod));
690 391
691 /* ramrod flags */ 392 /* Clear the vlan counters */
692 if (flags->drv_only) 393 if (!mac)
693 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 394 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
694 if (flags->single_cmd)
695 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
696 395
697 /* mac_vlan flags */ 396 return 0;
698 if (flags->dont_consume)
699 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
700
701 /* cmd */
702 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
703}
704
705static inline void
706bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
707 struct bnx2x_vfop_vlan_mac_flags *flags)
708{
709 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
710 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
711} 397}
712 398
713static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 399static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
714 struct bnx2x_virtf *vf, 400 struct bnx2x_virtf *vf, int qid,
715 struct bnx2x_vfop_cmd *cmd, 401 struct bnx2x_vf_mac_vlan_filter *filter,
716 int qid, bool drv_only) 402 bool drv_only)
717{ 403{
718 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 404 struct bnx2x_vlan_mac_ramrod_params ramrod;
719 int rc; 405 int rc;
720 406
721 if (vfop) { 407 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
722 struct bnx2x_vfop_args_filters filters = { 408 vf->abs_vfid, filter->add ? "Adding" : "Deleting",
723 .multi_filter = NULL, /* single */ 409 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
724 .credit = NULL, /* consume credit */ 410
725 }; 411 /* Prepare ramrod params */
726 struct bnx2x_vfop_vlan_mac_flags flags = { 412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
727 .drv_only = drv_only, 413 if (filter->type == BNX2X_VF_FILTER_VLAN) {
728 .dont_consume = (filters.credit != NULL), 414 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
729 .single_cmd = true, 415 &ramrod.user_req.vlan_mac_flags);
730 .add = false /* don't care */, 416 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
731 }; 417 ramrod.user_req.u.vlan.vlan = filter->vid;
732 struct bnx2x_vlan_mac_ramrod_params *ramrod = 418 } else {
733 &vf->op_params.vlan_mac; 419 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
734 420 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
735 /* set ramrod params */ 421 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
736 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 422 }
737 423 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
738 /* set object */ 424 BNX2X_VLAN_MAC_DEL;
739 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 425
740 if (rc) 426 /* Verify there are available vlan credits */
741 return rc; 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
742 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
743 429 vf_vlan_rules_cnt(vf))) {
744 /* set extra args */ 430 BNX2X_ERR("No credits for vlan\n");
745 vfop->args.filters = filters; 431 return -ENOMEM;
746
747 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
748 bnx2x_vfop_vlan_mac, cmd->done);
749 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
750 cmd->block);
751 } 432 }
752 return -ENOMEM;
753}
754
755int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
756 struct bnx2x_virtf *vf,
757 struct bnx2x_vfop_cmd *cmd,
758 struct bnx2x_vfop_filters *macs,
759 int qid, bool drv_only)
760{
761 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
762 int rc;
763 433
764 if (vfop) { 434 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
765 struct bnx2x_vfop_args_filters filters = { 435 if (drv_only)
766 .multi_filter = macs, 436 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
767 .credit = NULL, /* consume credit */ 437 else
768 }; 438 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
769 struct bnx2x_vfop_vlan_mac_flags flags = { 439
770 .drv_only = drv_only, 440 /* Add/Remove the filter */
771 .dont_consume = (filters.credit != NULL), 441 rc = bnx2x_config_vlan_mac(bp, &ramrod);
772 .single_cmd = false, 442 if (rc && rc != -EEXIST) {
773 .add = false, /* don't care since only the items in the 443 BNX2X_ERR("Failed to %s %s\n",
774 * filters list affect the sp operation, 444 filter->add ? "add" : "delete",
775 * not the list itself 445 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
776 */ 446 "VLAN");
777 }; 447 return rc;
778 struct bnx2x_vlan_mac_ramrod_params *ramrod = 448 }
779 &vf->op_params.vlan_mac;
780
781 /* set ramrod params */
782 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
783
784 /* set object */
785 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
786 if (rc)
787 return rc;
788 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
789 449
790 /* set extra args */ 450 /* Update the vlan counters */
791 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 451 if (filter->type == BNX2X_VF_FILTER_VLAN)
792 vfop->args.filters = filters; 452 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
453 &bnx2x_vfq(vf, qid, vlan_count));
793 454
794 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 455 return 0;
795 bnx2x_vfop_vlan_mac, cmd->done);
796 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
797 cmd->block);
798 }
799 return -ENOMEM;
800} 456}
801 457
802static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 458int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
803 struct bnx2x_virtf *vf, 459 struct bnx2x_vf_mac_vlan_filters *filters,
804 struct bnx2x_vfop_cmd *cmd, 460 int qid, bool drv_only)
805 int qid, u16 vid, bool add)
806{ 461{
807 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 462 int rc = 0, i;
808 int rc;
809 463
810 if (vfop) { 464 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
811 struct bnx2x_vfop_args_filters filters = {
812 .multi_filter = NULL, /* single command */
813 .credit = &bnx2x_vfq(vf, qid, vlan_count),
814 };
815 struct bnx2x_vfop_vlan_mac_flags flags = {
816 .drv_only = false,
817 .dont_consume = (filters.credit != NULL),
818 .single_cmd = true,
819 .add = add,
820 };
821 struct bnx2x_vlan_mac_ramrod_params *ramrod =
822 &vf->op_params.vlan_mac;
823
824 /* set ramrod params */
825 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
826 ramrod->user_req.u.vlan.vlan = vid;
827
828 /* set object */
829 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
830 if (rc)
831 return rc;
832 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
833 465
834 /* set extra args */ 466 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
835 vfop->args.filters = filters; 467 return -EINVAL;
836 468
837 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 469 /* Prepare ramrod params */
838 bnx2x_vfop_vlan_mac, cmd->done); 470 for (i = 0; i < filters->count; i++) {
839 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 471 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
840 cmd->block); 472 &filters->filters[i], drv_only);
473 if (rc)
474 break;
841 } 475 }
842 return -ENOMEM;
843}
844
845static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
846 struct bnx2x_virtf *vf,
847 struct bnx2x_vfop_cmd *cmd,
848 int qid, bool drv_only)
849{
850 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
851 int rc;
852 476
853 if (vfop) { 477 /* Rollback if needed */
854 struct bnx2x_vfop_args_filters filters = { 478 if (i != filters->count) {
855 .multi_filter = NULL, /* single command */ 479 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
856 .credit = &bnx2x_vfq(vf, qid, vlan_count), 480 i, filters->count + 1);
857 }; 481 while (--i >= 0) {
858 struct bnx2x_vfop_vlan_mac_flags flags = { 482 filters->filters[i].add = !filters->filters[i].add;
859 .drv_only = drv_only, 483 bnx2x_vf_mac_vlan_config(bp, vf, qid,
860 .dont_consume = (filters.credit != NULL), 484 &filters->filters[i],
861 .single_cmd = true, 485 drv_only);
862 .add = false, /* don't care */ 486 }
863 }; 487 }
864 struct bnx2x_vlan_mac_ramrod_params *ramrod =
865 &vf->op_params.vlan_mac;
866
867 /* set ramrod params */
868 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
869
870 /* set object */
871 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
872 if (rc)
873 return rc;
874 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
875 488
876 /* set extra args */ 489 /* It's our responsibility to free the filters */
877 vfop->args.filters = filters; 490 kfree(filters);
878 491
879 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 492 return rc;
880 bnx2x_vfop_vlan_mac, cmd->done);
881 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
882 cmd->block);
883 }
884 return -ENOMEM;
885} 493}
886 494
887int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 495int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
888 struct bnx2x_virtf *vf, 496 struct bnx2x_vf_queue_construct_params *qctor)
889 struct bnx2x_vfop_cmd *cmd,
890 struct bnx2x_vfop_filters *vlans,
891 int qid, bool drv_only)
892{ 497{
893 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
894 int rc; 498 int rc;
895 499
896 if (vfop) { 500 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
897 struct bnx2x_vfop_args_filters filters = {
898 .multi_filter = vlans,
899 .credit = &bnx2x_vfq(vf, qid, vlan_count),
900 };
901 struct bnx2x_vfop_vlan_mac_flags flags = {
902 .drv_only = drv_only,
903 .dont_consume = (filters.credit != NULL),
904 .single_cmd = false,
905 .add = false, /* don't care */
906 };
907 struct bnx2x_vlan_mac_ramrod_params *ramrod =
908 &vf->op_params.vlan_mac;
909
910 /* set ramrod params */
911 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
912
913 /* set object */
914 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
915 if (rc)
916 return rc;
917 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
918
919 /* set extra args */
920 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
921 atomic_read(filters.credit);
922
923 vfop->args.filters = filters;
924 501
925 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 502 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
926 bnx2x_vfop_vlan_mac, cmd->done); 503 if (rc)
927 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
928 cmd->block);
929 }
930 return -ENOMEM;
931}
932
933/* VFOP queue setup (queue constructor + set vlan 0) */
934static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
935{
936 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
937 int qid = vfop->args.qctor.qid;
938 enum bnx2x_vfop_qsetup_state state = vfop->state;
939 struct bnx2x_vfop_cmd cmd = {
940 .done = bnx2x_vfop_qsetup,
941 .block = false,
942 };
943
944 if (vfop->rc < 0)
945 goto op_err; 504 goto op_err;
946 505
947 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 506 /* Configure vlan0 for leading queue */
507 if (!qid) {
508 struct bnx2x_vf_mac_vlan_filter filter;
948 509
949 switch (state) { 510 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
950 case BNX2X_VFOP_QSETUP_CTOR: 511 filter.type = BNX2X_VF_FILTER_VLAN;
951 /* init the queue ctor command */ 512 filter.add = true;
952 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 513 filter.vid = 0;
953 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 514 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
954 if (vfop->rc) 515 if (rc)
955 goto op_err; 516 goto op_err;
956 return; 517 }
957
958 case BNX2X_VFOP_QSETUP_VLAN0:
959 /* skip if non-leading or FPGA/EMU*/
960 if (qid)
961 goto op_done;
962 518
963 /* init the queue set-vlan command (for vlan 0) */ 519 /* Schedule the configuration of any pending vlan filters */
964 vfop->state = BNX2X_VFOP_QSETUP_DONE; 520 vf->cfg_flags |= VF_CFG_VLAN;
965 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 521 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
966 if (vfop->rc) 522 BNX2X_MSG_IOV);
967 goto op_err; 523 return 0;
968 return;
969op_err: 524op_err:
970 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 525 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
971op_done: 526 return rc;
972 case BNX2X_VFOP_QSETUP_DONE:
973 vf->cfg_flags |= VF_CFG_VLAN;
974 smp_mb__before_clear_bit();
975 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
976 &bp->sp_rtnl_state);
977 smp_mb__after_clear_bit();
978 schedule_delayed_work(&bp->sp_rtnl_task, 0);
979 bnx2x_vfop_end(bp, vf, vfop);
980 return;
981 default:
982 bnx2x_vfop_default(state);
983 }
984} 527}
985 528
986int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 529static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
987 struct bnx2x_virtf *vf, 530 int qid)
988 struct bnx2x_vfop_cmd *cmd,
989 int qid)
990{ 531{
991 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 532 int rc;
992 533
993 if (vfop) { 534 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
994 vfop->args.qctor.qid = qid;
995 535
996 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 536 /* If needed, clean the filtering data base */
997 bnx2x_vfop_qsetup, cmd->done); 537 if ((qid == LEADING_IDX) &&
998 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 538 bnx2x_validate_vf_sp_objs(bp, vf, false)) {
999 cmd->block); 539 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
540 if (rc)
541 goto op_err;
542 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
543 if (rc)
544 goto op_err;
1000 } 545 }
1001 return -ENOMEM;
1002}
1003
1004/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1005static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1006{
1007 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1008 int qid = vfop->args.qx.qid;
1009 enum bnx2x_vfop_qflr_state state = vfop->state;
1010 struct bnx2x_queue_state_params *qstate;
1011 struct bnx2x_vfop_cmd cmd;
1012
1013 bnx2x_vfop_reset_wq(vf);
1014
1015 if (vfop->rc < 0)
1016 goto op_err;
1017 546
1018 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 547 /* Terminate queue */
548 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
549 struct bnx2x_queue_state_params qstate;
1019 550
1020 cmd.done = bnx2x_vfop_qflr; 551 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
1021 cmd.block = false; 552 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1022 553 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
1023 switch (state) { 554 qstate.cmd = BNX2X_Q_CMD_TERMINATE;
1024 case BNX2X_VFOP_QFLR_CLR_VLAN: 555 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
1025 /* vlan-clear-all: driver-only, don't consume credit */ 556 rc = bnx2x_queue_state_change(bp, &qstate);
1026 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 557 if (rc)
1027 558 goto op_err;
1028 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
1029 /* the vlan_mac vfop will re-schedule us */
1030 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
1031 qid, true);
1032 if (vfop->rc)
1033 goto op_err;
1034 return;
1035
1036 } else {
1037 /* need to reschedule ourselves */
1038 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1039 }
1040
1041 case BNX2X_VFOP_QFLR_CLR_MAC:
1042 /* mac-clear-all: driver only consume credit */
1043 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1044 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
1045 /* the vlan_mac vfop will re-schedule us */
1046 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
1047 qid, true);
1048 if (vfop->rc)
1049 goto op_err;
1050 return;
1051
1052 } else {
1053 /* need to reschedule ourselves */
1054 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1055 }
1056
1057 case BNX2X_VFOP_QFLR_TERMINATE:
1058 qstate = &vfop->op_p->qctor.qstate;
1059 memset(qstate , 0, sizeof(*qstate));
1060 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1061 vfop->state = BNX2X_VFOP_QFLR_DONE;
1062
1063 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1064 vf->abs_vfid, qstate->q_obj->state);
1065
1066 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1067 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1068 qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1069 vfop->rc = bnx2x_queue_state_change(bp, qstate);
1070 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1071 } else {
1072 goto op_done;
1073 }
1074
1075op_err:
1076 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1077 vf->abs_vfid, qid, vfop->rc);
1078op_done:
1079 case BNX2X_VFOP_QFLR_DONE:
1080 bnx2x_vfop_end(bp, vf, vfop);
1081 return;
1082 default:
1083 bnx2x_vfop_default(state);
1084 } 559 }
1085op_pending:
1086 return;
1087}
1088
1089static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1090 struct bnx2x_virtf *vf,
1091 struct bnx2x_vfop_cmd *cmd,
1092 int qid)
1093{
1094 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1095 560
1096 if (vfop) { 561 return 0;
1097 vfop->args.qx.qid = qid; 562op_err:
1098 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 563 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
1099 bnx2x_vfop_qflr, cmd->done); 564 return rc;
1100 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1101 cmd->block);
1102 }
1103 return -ENOMEM;
1104} 565}
1105 566
1106/* VFOP multi-casts */ 567int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
1107static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 568 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
1108{ 569{
1109 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 570 struct bnx2x_mcast_list_elem *mc = NULL;
1110 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 571 struct bnx2x_mcast_ramrod_params mcast;
1111 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 572 int rc, i;
1112 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1113 enum bnx2x_vfop_mcast_state state = vfop->state;
1114 int i;
1115
1116 bnx2x_vfop_reset_wq(vf);
1117
1118 if (vfop->rc < 0)
1119 goto op_err;
1120 573
1121 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 574 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
1122
1123 switch (state) {
1124 case BNX2X_VFOP_MCAST_DEL:
1125 /* clear existing mcasts */
1126 vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
1127 : BNX2X_VFOP_MCAST_CHK_DONE;
1128 mcast->mcast_list_len = vf->mcast_list_len;
1129 vf->mcast_list_len = args->mc_num;
1130 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1131 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1132
1133 case BNX2X_VFOP_MCAST_ADD:
1134 if (raw->check_pending(raw))
1135 goto op_pending;
1136
1137 /* update mcast list on the ramrod params */
1138 INIT_LIST_HEAD(&mcast->mcast_list);
1139 for (i = 0; i < args->mc_num; i++)
1140 list_add_tail(&(args->mc[i].link),
1141 &mcast->mcast_list);
1142 mcast->mcast_list_len = args->mc_num;
1143 575
1144 /* add new mcasts */ 576 /* Prepare Multicast command */
1145 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 577 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
1146 vfop->rc = bnx2x_config_mcast(bp, mcast, 578 mcast.mcast_obj = &vf->mcast_obj;
1147 BNX2X_MCAST_CMD_ADD); 579 if (drv_only)
1148 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 580 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
1149 581 else
1150 case BNX2X_VFOP_MCAST_CHK_DONE: 582 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
1151 vfop->rc = raw->check_pending(raw) ? 1 : 0; 583 if (mc_num) {
1152 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 584 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
1153 default: 585 GFP_KERNEL);
1154 bnx2x_vfop_default(state); 586 if (!mc) {
587 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
588 return -ENOMEM;
589 }
1155 } 590 }
1156op_err:
1157 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1158op_done:
1159 kfree(args->mc);
1160 bnx2x_vfop_end(bp, vf, vfop);
1161op_pending:
1162 return;
1163}
1164 591
1165int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 592 /* clear existing mcasts */
1166 struct bnx2x_virtf *vf, 593 mcast.mcast_list_len = vf->mcast_list_len;
1167 struct bnx2x_vfop_cmd *cmd, 594 vf->mcast_list_len = mc_num;
1168 bnx2x_mac_addr_t *mcasts, 595 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
1169 int mcast_num, bool drv_only) 596 if (rc) {
1170{ 597 BNX2X_ERR("Failed to remove multicasts\n");
1171 struct bnx2x_vfop *vfop = NULL; 598 if (mc)
1172 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1173 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1174 NULL;
1175
1176 if (!mc_sz || mc) {
1177 vfop = bnx2x_vfop_add(bp, vf);
1178 if (vfop) {
1179 int i;
1180 struct bnx2x_mcast_ramrod_params *ramrod =
1181 &vf->op_params.mcast;
1182
1183 /* set ramrod params */
1184 memset(ramrod, 0, sizeof(*ramrod));
1185 ramrod->mcast_obj = &vf->mcast_obj;
1186 if (drv_only)
1187 set_bit(RAMROD_DRV_CLR_ONLY,
1188 &ramrod->ramrod_flags);
1189
1190 /* copy mcasts pointers */
1191 vfop->args.mc_list.mc_num = mcast_num;
1192 vfop->args.mc_list.mc = mc;
1193 for (i = 0; i < mcast_num; i++)
1194 mc[i].mac = mcasts[i];
1195
1196 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1197 bnx2x_vfop_mcast, cmd->done);
1198 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1199 cmd->block);
1200 } else {
1201 kfree(mc); 599 kfree(mc);
1202 } 600 return rc;
1203 } 601 }
1204 return -ENOMEM;
1205}
1206
1207/* VFOP rx-mode */
1208static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1209{
1210 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1211 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1212 enum bnx2x_vfop_rxmode_state state = vfop->state;
1213
1214 bnx2x_vfop_reset_wq(vf);
1215
1216 if (vfop->rc < 0)
1217 goto op_err;
1218 602
1219 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 603 /* update mcast list on the ramrod params */
1220 604 if (mc_num) {
1221 switch (state) { 605 INIT_LIST_HEAD(&mcast.mcast_list);
1222 case BNX2X_VFOP_RXMODE_CONFIG: 606 for (i = 0; i < mc_num; i++) {
1223 /* next state */ 607 mc[i].mac = mcasts[i];
1224 vfop->state = BNX2X_VFOP_RXMODE_DONE; 608 list_add_tail(&mc[i].link,
609 &mcast.mcast_list);
610 }
1225 611
1226 /* record the accept flags in vfdb so hypervisor can modify them 612 /* add new mcasts */
1227 * if necessary 613 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
1228 */ 614 if (rc)
1229 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = 615 BNX2X_ERR("Faled to add multicasts\n");
1230 ramrod->rx_accept_flags; 616 kfree(mc);
1231 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1232 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1233op_err:
1234 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1235op_done:
1236 case BNX2X_VFOP_RXMODE_DONE:
1237 bnx2x_vfop_end(bp, vf, vfop);
1238 return;
1239 default:
1240 bnx2x_vfop_default(state);
1241 } 617 }
1242op_pending: 618
1243 return; 619 return rc;
1244} 620}
1245 621
1246static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 622static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
@@ -1268,118 +644,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1268 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 644 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1269} 645}
1270 646
1271int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 647int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
1272 struct bnx2x_virtf *vf, 648 int qid, unsigned long accept_flags)
1273 struct bnx2x_vfop_cmd *cmd,
1274 int qid, unsigned long accept_flags)
1275{ 649{
1276 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 650 struct bnx2x_rx_mode_ramrod_params ramrod;
1277
1278 if (vfop) {
1279 struct bnx2x_rx_mode_ramrod_params *ramrod =
1280 &vf->op_params.rx_mode;
1281 651
1282 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); 652 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
1283 653
1284 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 654 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
1285 bnx2x_vfop_rxmode, cmd->done); 655 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
1286 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 656 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
1287 cmd->block); 657 return bnx2x_config_rx_mode(bp, &ramrod);
1288 }
1289 return -ENOMEM;
1290} 658}
1291 659
1292/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 660int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
1293 * queue destructor)
1294 */
1295static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1296{ 661{
1297 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 662 int rc;
1298 int qid = vfop->args.qx.qid;
1299 enum bnx2x_vfop_qteardown_state state = vfop->state;
1300 struct bnx2x_vfop_cmd cmd;
1301
1302 if (vfop->rc < 0)
1303 goto op_err;
1304
1305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1306
1307 cmd.done = bnx2x_vfop_qdown;
1308 cmd.block = false;
1309
1310 switch (state) {
1311 case BNX2X_VFOP_QTEARDOWN_RXMODE:
1312 /* Drop all */
1313 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1314 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1315 if (vfop->rc)
1316 goto op_err;
1317 return;
1318
1319 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1320 /* vlan-clear-all: don't consume credit */
1321 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1322 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1323 if (vfop->rc)
1324 goto op_err;
1325 return;
1326
1327 case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1328 /* mac-clear-all: consume credit */
1329 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
1330 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1331 if (vfop->rc)
1332 goto op_err;
1333 return;
1334 663
1335 case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: 664 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
1336 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1337 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
1338 if (vfop->rc)
1339 goto op_err;
1340 return;
1341 665
1342 case BNX2X_VFOP_QTEARDOWN_QDTOR: 666 /* Remove all classification configuration for leading queue */
1343 /* run the queue destruction flow */ 667 if (qid == LEADING_IDX) {
1344 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 668 rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
1345 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 669 if (rc)
1346 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1347 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1348 DP(BNX2X_MSG_IOV, "returned from cmd\n");
1349 if (vfop->rc)
1350 goto op_err; 670 goto op_err;
1351 return;
1352op_err:
1353 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1354 vf->abs_vfid, qid, vfop->rc);
1355
1356 case BNX2X_VFOP_QTEARDOWN_DONE:
1357 bnx2x_vfop_end(bp, vf, vfop);
1358 return;
1359 default:
1360 bnx2x_vfop_default(state);
1361 }
1362}
1363 671
1364int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 672 /* Remove filtering if feasible */
1365 struct bnx2x_virtf *vf, 673 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
1366 struct bnx2x_vfop_cmd *cmd, 674 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
1367 int qid) 675 false, false);
1368{ 676 if (rc)
1369 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 677 goto op_err;
1370 678 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
1371 /* for non leading queues skip directly to qdown sate */ 679 false, true);
1372 if (vfop) { 680 if (rc)
1373 vfop->args.qx.qid = qid; 681 goto op_err;
1374 bnx2x_vfop_opset(qid == LEADING_IDX ? 682 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
1375 BNX2X_VFOP_QTEARDOWN_RXMODE : 683 if (rc)
1376 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, 684 goto op_err;
1377 cmd->done); 685 }
1378 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1379 cmd->block);
1380 } 686 }
1381 687
1382 return -ENOMEM; 688 /* Destroy queue */
689 rc = bnx2x_vf_queue_destroy(bp, vf, qid);
690 if (rc)
691 goto op_err;
692 return rc;
693op_err:
694 BNX2X_ERR("vf[%d:%d] error: rc %d\n",
695 vf->abs_vfid, qid, rc);
696 return rc;
1383} 697}
1384 698
1385/* VF enable primitives 699/* VF enable primitives
@@ -1579,120 +893,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1579 bnx2x_tx_hw_flushed(bp, poll_cnt); 893 bnx2x_tx_hw_flushed(bp, poll_cnt);
1580} 894}
1581 895
1582static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 896static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1583{ 897{
1584 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 898 int rc, i;
1585 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1586 enum bnx2x_vfop_flr_state state = vfop->state;
1587 struct bnx2x_vfop_cmd cmd = {
1588 .done = bnx2x_vfop_flr,
1589 .block = false,
1590 };
1591
1592 if (vfop->rc < 0)
1593 goto op_err;
1594
1595 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1596 899
1597 switch (state) { 900 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
1598 case BNX2X_VFOP_FLR_QUEUES:
1599 /* the cleanup operations are valid if and only if the VF
1600 * was first acquired.
1601 */
1602 if (++(qx->qid) < vf_rxq_count(vf)) {
1603 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1604 qx->qid);
1605 if (vfop->rc)
1606 goto op_err;
1607 return;
1608 }
1609 /* remove multicasts */
1610 vfop->state = BNX2X_VFOP_FLR_HW;
1611 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1612 0, true);
1613 if (vfop->rc)
1614 goto op_err;
1615 return;
1616 case BNX2X_VFOP_FLR_HW:
1617 901
1618 /* dispatch final cleanup and wait for HW queues to flush */ 902 /* the cleanup operations are valid if and only if the VF
1619 bnx2x_vf_flr_clnup_hw(bp, vf); 903 * was first acquired.
904 */
905 for (i = 0; i < vf_rxq_count(vf); i++) {
906 rc = bnx2x_vf_queue_flr(bp, vf, i);
907 if (rc)
908 goto out;
909 }
1620 910
1621 /* release VF resources */ 911 /* remove multicasts */
1622 bnx2x_vf_free_resc(bp, vf); 912 bnx2x_vf_mcast(bp, vf, NULL, 0, true);
1623 913
1624 /* re-open the mailbox */ 914 /* dispatch final cleanup and wait for HW queues to flush */
1625 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 915 bnx2x_vf_flr_clnup_hw(bp, vf);
1626 916
1627 goto op_done; 917 /* release VF resources */
1628 default: 918 bnx2x_vf_free_resc(bp, vf);
1629 bnx2x_vfop_default(state);
1630 }
1631op_err:
1632 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1633op_done:
1634 vf->flr_clnup_stage = VF_FLR_ACK;
1635 bnx2x_vfop_end(bp, vf, vfop);
1636 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1637}
1638 919
1639static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 920 /* re-open the mailbox */
1640 struct bnx2x_virtf *vf, 921 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1641 vfop_handler_t done) 922 return;
1642{ 923out:
1643 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 924 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
1644 if (vfop) { 925 vf->abs_vfid, i, rc);
1645 vfop->args.qx.qid = -1; /* loop */
1646 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1647 bnx2x_vfop_flr, done);
1648 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1649 }
1650 return -ENOMEM;
1651} 926}
1652 927
1653static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 928static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
1654{ 929{
1655 int i = prev_vf ? prev_vf->index + 1 : 0;
1656 struct bnx2x_virtf *vf; 930 struct bnx2x_virtf *vf;
931 int i;
1657 932
1658 /* find next VF to cleanup */ 933 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
1659next_vf_to_clean: 934 /* VF should be RESET & in FLR cleanup states */
1660 for (; 935 if (bnx2x_vf(bp, i, state) != VF_RESET ||
1661 i < BNX2X_NR_VIRTFN(bp) && 936 !bnx2x_vf(bp, i, flr_clnup_stage))
1662 (bnx2x_vf(bp, i, state) != VF_RESET || 937 continue;
1663 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1664 i++)
1665 ;
1666 938
1667 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 939 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
1668 BNX2X_NR_VIRTFN(bp)); 940 i, BNX2X_NR_VIRTFN(bp));
1669 941
1670 if (i < BNX2X_NR_VIRTFN(bp)) {
1671 vf = BP_VF(bp, i); 942 vf = BP_VF(bp, i);
1672 943
1673 /* lock the vf pf channel */ 944 /* lock the vf pf channel */
1674 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 945 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1675 946
1676 /* invoke the VF FLR SM */ 947 /* invoke the VF FLR SM */
1677 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 948 bnx2x_vf_flr(bp, vf);
1678 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1679 vf->abs_vfid);
1680 949
1681 /* mark the VF to be ACKED and continue */ 950 /* mark the VF to be ACKED and continue */
1682 vf->flr_clnup_stage = VF_FLR_ACK; 951 vf->flr_clnup_stage = false;
1683 goto next_vf_to_clean; 952 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1684 }
1685 return;
1686 }
1687
1688 /* we are done, update vf records */
1689 for_each_vf(bp, i) {
1690 vf = BP_VF(bp, i);
1691
1692 if (vf->flr_clnup_stage != VF_FLR_ACK)
1693 continue;
1694
1695 vf->flr_clnup_stage = VF_FLR_EPILOG;
1696 } 953 }
1697 954
1698 /* Acknowledge the handled VFs. 955 /* Acknowledge the handled VFs.
@@ -1742,7 +999,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1742 if (reset) { 999 if (reset) {
1743 /* set as reset and ready for cleanup */ 1000 /* set as reset and ready for cleanup */
1744 vf->state = VF_RESET; 1001 vf->state = VF_RESET;
1745 vf->flr_clnup_stage = VF_FLR_CLN; 1002 vf->flr_clnup_stage = true;
1746 1003
1747 DP(BNX2X_MSG_IOV, 1004 DP(BNX2X_MSG_IOV,
1748 "Initiating Final cleanup for VF %d\n", 1005 "Initiating Final cleanup for VF %d\n",
@@ -1751,7 +1008,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1751 } 1008 }
1752 1009
1753 /* do the FLR cleanup for all marked VFs*/ 1010 /* do the FLR cleanup for all marked VFs*/
1754 bnx2x_vf_flr_clnup(bp, NULL); 1011 bnx2x_vf_flr_clnup(bp);
1755} 1012}
1756 1013
1757/* IOV global initialization routines */ 1014/* IOV global initialization routines */
@@ -2018,7 +1275,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
2018 bnx2x_vf(bp, i, index) = i; 1275 bnx2x_vf(bp, i, index) = i;
2019 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1276 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
2020 bnx2x_vf(bp, i, state) = VF_FREE; 1277 bnx2x_vf(bp, i, state) = VF_FREE;
2021 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
2022 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1278 mutex_init(&bnx2x_vf(bp, i, op_mutex));
2023 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1279 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
2024 } 1280 }
@@ -2039,6 +1295,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
2039 goto failed; 1295 goto failed;
2040 } 1296 }
2041 1297
1298 /* Prepare the VFs event synchronization mechanism */
1299 mutex_init(&bp->vfdb->event_mutex);
1300
2042 return 0; 1301 return 0;
2043failed: 1302failed:
2044 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1303 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -2117,7 +1376,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2117 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1376 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2118 1377
2119 if (cxt->size) { 1378 if (cxt->size) {
2120 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 1379 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1380 if (!cxt->addr)
1381 goto alloc_mem_err;
2121 } else { 1382 } else {
2122 cxt->addr = NULL; 1383 cxt->addr = NULL;
2123 cxt->mapping = 0; 1384 cxt->mapping = 0;
@@ -2127,20 +1388,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2127 1388
2128 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1389 /* allocate vfs ramrods dma memory - client_init and set_mac */
2129 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1390 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2130 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 1391 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
2131 tot_size); 1392 tot_size);
1393 if (!BP_VFDB(bp)->sp_dma.addr)
1394 goto alloc_mem_err;
2132 BP_VFDB(bp)->sp_dma.size = tot_size; 1395 BP_VFDB(bp)->sp_dma.size = tot_size;
2133 1396
2134 /* allocate mailboxes */ 1397 /* allocate mailboxes */
2135 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1398 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2136 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 1399 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
2137 tot_size); 1400 tot_size);
1401 if (!BP_VF_MBX_DMA(bp)->addr)
1402 goto alloc_mem_err;
1403
2138 BP_VF_MBX_DMA(bp)->size = tot_size; 1404 BP_VF_MBX_DMA(bp)->size = tot_size;
2139 1405
2140 /* allocate local bulletin boards */ 1406 /* allocate local bulletin boards */
2141 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 1407 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2142 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 1408 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
2143 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 1409 tot_size);
1410 if (!BP_VF_BULLETIN_DMA(bp)->addr)
1411 goto alloc_mem_err;
1412
2144 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 1413 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2145 1414
2146 return 0; 1415 return 0;
@@ -2166,6 +1435,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2166 bnx2x_vf_sp_map(bp, vf, q_data), 1435 bnx2x_vf_sp_map(bp, vf, q_data),
2167 q_type); 1436 q_type);
2168 1437
1438 /* sp indication is set only when vlan/mac/etc. are initialized */
1439 q->sp_initialized = false;
1440
2169 DP(BNX2X_MSG_IOV, 1441 DP(BNX2X_MSG_IOV,
2170 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 1442 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2171 vf->abs_vfid, q->sp_obj.func_id, q->cid); 1443 vf->abs_vfid, q->sp_obj.func_id, q->cid);
@@ -2269,7 +1541,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2269 1541
2270 /* release all the VFs */ 1542 /* release all the VFs */
2271 for_each_vf(bp, i) 1543 for_each_vf(bp, i)
2272 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 1544 bnx2x_vf_release(bp, BP_VF(bp, i));
2273 1545
2274 return 0; 1546 return 0;
2275} 1547}
@@ -2359,6 +1631,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2359 smp_mb__after_clear_bit(); 1631 smp_mb__after_clear_bit();
2360} 1632}
2361 1633
1634static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1635 struct bnx2x_virtf *vf)
1636{
1637 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1638}
1639
2362int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1640int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2363{ 1641{
2364 struct bnx2x_virtf *vf; 1642 struct bnx2x_virtf *vf;
@@ -2383,6 +1661,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2383 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1661 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2384 case EVENT_RING_OPCODE_MULTICAST_RULES: 1662 case EVENT_RING_OPCODE_MULTICAST_RULES:
2385 case EVENT_RING_OPCODE_FILTERS_RULES: 1663 case EVENT_RING_OPCODE_FILTERS_RULES:
1664 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
2386 cid = (elem->message.data.eth_event.echo & 1665 cid = (elem->message.data.eth_event.echo &
2387 BNX2X_SWCID_MASK); 1666 BNX2X_SWCID_MASK);
2388 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1667 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
@@ -2447,13 +1726,15 @@ get_vf:
2447 vf->abs_vfid, qidx); 1726 vf->abs_vfid, qidx);
2448 bnx2x_vf_handle_filters_eqe(bp, vf); 1727 bnx2x_vf_handle_filters_eqe(bp, vf);
2449 break; 1728 break;
1729 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1730 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1731 vf->abs_vfid, qidx);
1732 bnx2x_vf_handle_rss_update_eqe(bp, vf);
2450 case EVENT_RING_OPCODE_VF_FLR: 1733 case EVENT_RING_OPCODE_VF_FLR:
2451 case EVENT_RING_OPCODE_MALICIOUS_VF: 1734 case EVENT_RING_OPCODE_MALICIOUS_VF:
2452 /* Do nothing for now */ 1735 /* Do nothing for now */
2453 return 0; 1736 return 0;
2454 } 1737 }
2455 /* SRIOV: reschedule any 'in_progress' operations */
2456 bnx2x_iov_sp_event(bp, cid, false);
2457 1738
2458 return 0; 1739 return 0;
2459} 1740}
@@ -2490,23 +1771,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2490 } 1771 }
2491} 1772}
2492 1773
2493void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2494{
2495 struct bnx2x_virtf *vf;
2496
2497 /* check if the cid is the VF range */
2498 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2499 return;
2500
2501 vf = bnx2x_vf_by_cid(bp, vf_cid);
2502 if (vf) {
2503 /* set in_progress flag */
2504 atomic_set(&vf->op_in_progress, 1);
2505 if (queue_work)
2506 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2507 }
2508}
2509
2510void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1774void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2511{ 1775{
2512 int i; 1776 int i;
@@ -2527,10 +1791,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2527 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1791 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2528 (is_fcoe ? 0 : 1); 1792 (is_fcoe ? 0 : 1);
2529 1793
2530 DP(BNX2X_MSG_IOV, 1794 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2531 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1795 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2532 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1796 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2533 first_queue_query_index + num_queues_req); 1797 first_queue_query_index + num_queues_req);
2534 1798
2535 cur_data_offset = bp->fw_stats_data_mapping + 1799 cur_data_offset = bp->fw_stats_data_mapping +
2536 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1800 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2544,9 +1808,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2544 struct bnx2x_virtf *vf = BP_VF(bp, i); 1808 struct bnx2x_virtf *vf = BP_VF(bp, i);
2545 1809
2546 if (vf->state != VF_ENABLED) { 1810 if (vf->state != VF_ENABLED) {
2547 DP(BNX2X_MSG_IOV, 1811 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2548 "vf %d not enabled so no stats for it\n", 1812 "vf %d not enabled so no stats for it\n",
2549 vf->abs_vfid); 1813 vf->abs_vfid);
2550 continue; 1814 continue;
2551 } 1815 }
2552 1816
@@ -2588,32 +1852,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2588 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1852 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2589} 1853}
2590 1854
2591void bnx2x_iov_sp_task(struct bnx2x *bp)
2592{
2593 int i;
2594
2595 if (!IS_SRIOV(bp))
2596 return;
2597 /* Iterate over all VFs and invoke state transition for VFs with
2598 * 'in-progress' slow-path operations
2599 */
2600 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2601 for_each_vf(bp, i) {
2602 struct bnx2x_virtf *vf = BP_VF(bp, i);
2603
2604 if (!vf) {
2605 BNX2X_ERR("VF was null! skipping...\n");
2606 continue;
2607 }
2608
2609 if (!list_empty(&vf->op_list_head) &&
2610 atomic_read(&vf->op_in_progress)) {
2611 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2612 bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2613 }
2614 }
2615}
2616
2617static inline 1855static inline
2618struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 1856struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2619{ 1857{
@@ -2849,52 +2087,26 @@ static void bnx2x_set_vf_state(void *cookie)
2849 p->vf->state = p->state; 2087 p->vf->state = p->state;
2850} 2088}
2851 2089
2852/* VFOP close (teardown the queues, delete mcasts and close HW) */ 2090int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2853static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2854{ 2091{
2855 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2092 int rc = 0, i;
2856 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2857 enum bnx2x_vfop_close_state state = vfop->state;
2858 struct bnx2x_vfop_cmd cmd = {
2859 .done = bnx2x_vfop_close,
2860 .block = false,
2861 };
2862 2093
2863 if (vfop->rc < 0) 2094 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2864 goto op_err;
2865
2866 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2867
2868 switch (state) {
2869 case BNX2X_VFOP_CLOSE_QUEUES:
2870
2871 if (++(qx->qid) < vf_rxq_count(vf)) {
2872 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2873 if (vfop->rc)
2874 goto op_err;
2875 return;
2876 }
2877 vfop->state = BNX2X_VFOP_CLOSE_HW;
2878 vfop->rc = 0;
2879 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
2880 2095
2881 case BNX2X_VFOP_CLOSE_HW: 2096 /* Close all queues */
2882 2097 for (i = 0; i < vf_rxq_count(vf); i++) {
2883 /* disable the interrupts */ 2098 rc = bnx2x_vf_queue_teardown(bp, vf, i);
2884 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2099 if (rc)
2885 bnx2x_vf_igu_disable(bp, vf); 2100 goto op_err;
2101 }
2886 2102
2887 /* disable the VF */ 2103 /* disable the interrupts */
2888 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2104 DP(BNX2X_MSG_IOV, "disabling igu\n");
2889 bnx2x_vf_clr_qtbl(bp, vf); 2105 bnx2x_vf_igu_disable(bp, vf);
2890 2106
2891 goto op_done; 2107 /* disable the VF */
2892 default: 2108 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2893 bnx2x_vfop_default(state); 2109 bnx2x_vf_clr_qtbl(bp, vf);
2894 }
2895op_err:
2896 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2897op_done:
2898 2110
2899 /* need to make sure there are no outstanding stats ramrods which may 2111 /* need to make sure there are no outstanding stats ramrods which may
2900 * cause the device to access the VF's stats buffer which it will free 2112 * cause the device to access the VF's stats buffer which it will free
@@ -2909,43 +2121,20 @@ op_done:
2909 } 2121 }
2910 2122
2911 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2123 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2912 bnx2x_vfop_end(bp, vf, vfop);
2913op_pending:
2914 /* Not supported at the moment; Exists for macros only */
2915 return;
2916}
2917 2124
2918int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2125 return 0;
2919 struct bnx2x_virtf *vf, 2126op_err:
2920 struct bnx2x_vfop_cmd *cmd) 2127 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2921{ 2128 return rc;
2922 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2923 if (vfop) {
2924 vfop->args.qx.qid = -1; /* loop */
2925 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2926 bnx2x_vfop_close, cmd->done);
2927 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2928 cmd->block);
2929 }
2930 return -ENOMEM;
2931} 2129}
2932 2130
2933/* VF release can be called either: 1. The VF was acquired but 2131/* VF release can be called either: 1. The VF was acquired but
2934 * not enabled 2. the vf was enabled or in the process of being 2132 * not enabled 2. the vf was enabled or in the process of being
2935 * enabled 2133 * enabled
2936 */ 2134 */
2937static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2135int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2938{ 2136{
2939 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2137 int rc;
2940 struct bnx2x_vfop_cmd cmd = {
2941 .done = bnx2x_vfop_release,
2942 .block = false,
2943 };
2944
2945 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2946
2947 if (vfop->rc < 0)
2948 goto op_err;
2949 2138
2950 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2139 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2951 vf->state == VF_FREE ? "Free" : 2140 vf->state == VF_FREE ? "Free" :
@@ -2956,116 +2145,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2956 2145
2957 switch (vf->state) { 2146 switch (vf->state) {
2958 case VF_ENABLED: 2147 case VF_ENABLED:
2959 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2148 rc = bnx2x_vf_close(bp, vf);
2960 if (vfop->rc) 2149 if (rc)
2961 goto op_err; 2150 goto op_err;
2962 return; 2151 /* Fallthrough to release resources */
2963
2964 case VF_ACQUIRED: 2152 case VF_ACQUIRED:
2965 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2153 DP(BNX2X_MSG_IOV, "about to free resources\n");
2966 bnx2x_vf_free_resc(bp, vf); 2154 bnx2x_vf_free_resc(bp, vf);
2967 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2155 break;
2968 goto op_done;
2969 2156
2970 case VF_FREE: 2157 case VF_FREE:
2971 case VF_RESET: 2158 case VF_RESET:
2972 /* do nothing */
2973 goto op_done;
2974 default: 2159 default:
2975 bnx2x_vfop_default(vf->state); 2160 break;
2976 } 2161 }
2162 return 0;
2977op_err: 2163op_err:
2978 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2164 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2979op_done: 2165 return rc;
2980 bnx2x_vfop_end(bp, vf, vfop);
2981} 2166}
2982 2167
2983static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) 2168int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2169 struct bnx2x_config_rss_params *rss)
2984{ 2170{
2985 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2171 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2986 enum bnx2x_vfop_rss_state state; 2172 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2987 2173 return bnx2x_config_rss(bp, rss);
2988 if (!vfop) {
2989 BNX2X_ERR("vfop was null\n");
2990 return;
2991 }
2992
2993 state = vfop->state;
2994 bnx2x_vfop_reset_wq(vf);
2995
2996 if (vfop->rc < 0)
2997 goto op_err;
2998
2999 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
3000
3001 switch (state) {
3002 case BNX2X_VFOP_RSS_CONFIG:
3003 /* next state */
3004 vfop->state = BNX2X_VFOP_RSS_DONE;
3005 bnx2x_config_rss(bp, &vfop->op_p->rss);
3006 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3007op_err:
3008 BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
3009op_done:
3010 case BNX2X_VFOP_RSS_DONE:
3011 bnx2x_vfop_end(bp, vf, vfop);
3012 return;
3013 default:
3014 bnx2x_vfop_default(state);
3015 }
3016op_pending:
3017 return;
3018} 2174}
3019 2175
3020int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2176int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
3021 struct bnx2x_virtf *vf, 2177 struct vfpf_tpa_tlv *tlv,
3022 struct bnx2x_vfop_cmd *cmd) 2178 struct bnx2x_queue_update_tpa_params *params)
3023{ 2179{
3024 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2180 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
3025 if (vfop) { 2181 struct bnx2x_queue_state_params qstate;
3026 bnx2x_vfop_opset(-1, /* use vf->state */ 2182 int qid, rc = 0;
3027 bnx2x_vfop_release, cmd->done);
3028 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
3029 cmd->block);
3030 }
3031 return -ENOMEM;
3032}
3033 2183
3034int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 2184 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
3035 struct bnx2x_virtf *vf, 2185
3036 struct bnx2x_vfop_cmd *cmd) 2186 /* Set ramrod params */
3037{ 2187 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
3038 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2188 memcpy(&qstate.params.update_tpa, params,
2189 sizeof(struct bnx2x_queue_update_tpa_params));
2190 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2191 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
3039 2192
3040 if (vfop) { 2193 for (qid = 0; qid < vf_rxq_count(vf); qid++) {
3041 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, 2194 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
3042 cmd->done); 2195 qstate.params.update_tpa.sge_map = sge_addr[qid];
3043 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, 2196 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
3044 cmd->block); 2197 vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2198 U64_LO(sge_addr[qid]));
2199 rc = bnx2x_queue_state_change(bp, &qstate);
2200 if (rc) {
2201 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2202 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2203 vf->abs_vfid, qid);
2204 return rc;
2205 }
3045 } 2206 }
3046 return -ENOMEM; 2207
2208 return rc;
3047} 2209}
3048 2210
3049/* VF release ~ VF close + VF release-resources 2211/* VF release ~ VF close + VF release-resources
3050 * Release is the ultimate SW shutdown and is called whenever an 2212 * Release is the ultimate SW shutdown and is called whenever an
3051 * irrecoverable error is encountered. 2213 * irrecoverable error is encountered.
3052 */ 2214 */
3053void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 2215int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
3054{ 2216{
3055 struct bnx2x_vfop_cmd cmd = {
3056 .done = NULL,
3057 .block = block,
3058 };
3059 int rc; 2217 int rc;
3060 2218
3061 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 2219 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
3062 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2220 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
3063 2221
3064 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 2222 rc = bnx2x_vf_free(bp, vf);
3065 if (rc) 2223 if (rc)
3066 WARN(rc, 2224 WARN(rc,
3067 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2225 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
3068 vf->abs_vfid, rc); 2226 vf->abs_vfid, rc);
2227 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2228 return rc;
3069} 2229}
3070 2230
3071static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2231static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
@@ -3074,16 +2234,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
3074 *sbdf = vf->devfn | (vf->bus << 8); 2234 *sbdf = vf->devfn | (vf->bus << 8);
3075} 2235}
3076 2236
3077static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
3078 struct bnx2x_vf_bar_info *bar_info)
3079{
3080 int n;
3081
3082 bar_info->nr_bars = bp->vfdb->sriov.nres;
3083 for (n = 0; n < bar_info->nr_bars; n++)
3084 bar_info->bars[n] = vf->bars[n];
3085}
3086
3087void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2237void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3088 enum channel_tlvs tlv) 2238 enum channel_tlvs tlv)
3089{ 2239{
@@ -3405,13 +2555,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3405 ivi->spoofchk = 1; /*always enabled */ 2555 ivi->spoofchk = 1; /*always enabled */
3406 if (vf->state == VF_ENABLED) { 2556 if (vf->state == VF_ENABLED) {
3407 /* mac and vlan are in vlan_mac objects */ 2557 /* mac and vlan are in vlan_mac objects */
3408 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) 2558 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
3409 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 2559 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3410 0, ETH_ALEN); 2560 0, ETH_ALEN);
3411 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
3412 vlan_obj->get_n_elements(bp, vlan_obj, 1, 2561 vlan_obj->get_n_elements(bp, vlan_obj, 1,
3413 (u8 *)&ivi->vlan, 0, 2562 (u8 *)&ivi->vlan, 0,
3414 VLAN_HLEN); 2563 VLAN_HLEN);
2564 }
3415 } else { 2565 } else {
3416 /* mac */ 2566 /* mac */
3417 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 2567 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3485,17 +2635,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3485 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 2635 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3486 /* configure the mac in device on this vf's queue */ 2636 /* configure the mac in device on this vf's queue */
3487 unsigned long ramrod_flags = 0; 2637 unsigned long ramrod_flags = 0;
3488 struct bnx2x_vlan_mac_obj *mac_obj = 2638 struct bnx2x_vlan_mac_obj *mac_obj;
3489 &bnx2x_leading_vfq(vf, mac_obj);
3490 2639
3491 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 2640 /* User should be able to see failure reason in system logs */
3492 if (rc) 2641 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3493 return rc; 2642 return -EINVAL;
3494 2643
3495 /* must lock vfpf channel to protect against vf flows */ 2644 /* must lock vfpf channel to protect against vf flows */
3496 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2645 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3497 2646
3498 /* remove existing eth macs */ 2647 /* remove existing eth macs */
2648 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3499 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 2649 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3500 if (rc) { 2650 if (rc) {
3501 BNX2X_ERR("failed to delete eth macs\n"); 2651 BNX2X_ERR("failed to delete eth macs\n");
@@ -3569,17 +2719,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3569 BNX2X_Q_LOGICAL_STATE_ACTIVE) 2719 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3570 return rc; 2720 return rc;
3571 2721
3572 /* configure the vlan in device on this vf's queue */ 2722 /* User should be able to see error in system logs */
3573 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2723 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3574 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 2724 return -EINVAL;
3575 if (rc)
3576 return rc;
3577 2725
3578 /* must lock vfpf channel to protect against vf flows */ 2726 /* must lock vfpf channel to protect against vf flows */
3579 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2727 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3580 2728
3581 /* remove existing vlans */ 2729 /* remove existing vlans */
3582 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2730 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2731 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3583 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 2732 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3584 &ramrod_flags); 2733 &ramrod_flags);
3585 if (rc) { 2734 if (rc) {
@@ -3736,13 +2885,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
3736 bnx2x_sample_bulletin(bp); 2885 bnx2x_sample_bulletin(bp);
3737 2886
3738 /* if channel is down we need to self destruct */ 2887 /* if channel is down we need to self destruct */
3739 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 2888 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3740 smp_mb__before_clear_bit(); 2889 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3741 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 2890 BNX2X_MSG_IOV);
3742 &bp->sp_rtnl_state);
3743 smp_mb__after_clear_bit();
3744 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3745 }
3746} 2891}
3747 2892
3748void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 2893void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
@@ -3756,12 +2901,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3756 mutex_init(&bp->vf2pf_mutex); 2901 mutex_init(&bp->vf2pf_mutex);
3757 2902
3758 /* allocate vf2pf mailbox for vf to pf channel */ 2903 /* allocate vf2pf mailbox for vf to pf channel */
3759 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 2904 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3760 sizeof(struct bnx2x_vf_mbx_msg)); 2905 sizeof(struct bnx2x_vf_mbx_msg));
2906 if (!bp->vf2pf_mbox)
2907 goto alloc_mem_err;
3761 2908
3762 /* allocate pf 2 vf bulletin board */ 2909 /* allocate pf 2 vf bulletin board */
3763 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 2910 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3764 sizeof(union pf_vf_bulletin)); 2911 sizeof(union pf_vf_bulletin));
2912 if (!bp->pf2vf_bulletin)
2913 goto alloc_mem_err;
3765 2914
3766 return 0; 2915 return 0;
3767 2916
@@ -3792,3 +2941,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)
3792 bnx2x_post_vf_bulletin(bp, vf_idx); 2941 bnx2x_post_vf_bulletin(bp, vf_idx);
3793 } 2942 }
3794} 2943}
2944
2945void bnx2x_iov_task(struct work_struct *work)
2946{
2947 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
2948
2949 if (!netif_running(bp->dev))
2950 return;
2951
2952 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
2953 &bp->iov_task_state))
2954 bnx2x_vf_handle_flr_event(bp);
2955
2956 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
2957 &bp->iov_task_state))
2958 bnx2x_vf_mbx(bp);
2959}
2960
2961void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
2962{
2963 smp_mb__before_clear_bit();
2964 set_bit(flag, &bp->iov_task_state);
2965 smp_mb__after_clear_bit();
2966 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
2967 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
2968}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d9fcca1b5a9d..8bf764570eef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -30,6 +30,8 @@ enum sample_bulletin_result {
30 30
31#ifdef CONFIG_BNX2X_SRIOV 31#ifdef CONFIG_BNX2X_SRIOV
32 32
33extern struct workqueue_struct *bnx2x_iov_wq;
34
33/* The bnx2x device structure holds vfdb structure described below. 35/* The bnx2x device structure holds vfdb structure described below.
34 * The VF array is indexed by the relative vfid. 36 * The VF array is indexed by the relative vfid.
35 */ 37 */
@@ -83,108 +85,35 @@ struct bnx2x_vf_queue {
83 u16 index; 85 u16 index;
84 u16 sb_idx; 86 u16 sb_idx;
85 bool is_leading; 87 bool is_leading;
88 bool sp_initialized;
86}; 89};
87 90
88/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: 91/* struct bnx2x_vf_queue_construct_params - prepare queue construction
89 * q-init, q-setup and SB index 92 * parameters: q-init, q-setup and SB index
90 */ 93 */
91struct bnx2x_vfop_qctor_params { 94struct bnx2x_vf_queue_construct_params {
92 struct bnx2x_queue_state_params qstate; 95 struct bnx2x_queue_state_params qstate;
93 struct bnx2x_queue_setup_params prep_qsetup; 96 struct bnx2x_queue_setup_params prep_qsetup;
94}; 97};
95 98
96/* VFOP parameters (one copy per VF) */
97union bnx2x_vfop_params {
98 struct bnx2x_vlan_mac_ramrod_params vlan_mac;
99 struct bnx2x_rx_mode_ramrod_params rx_mode;
100 struct bnx2x_mcast_ramrod_params mcast;
101 struct bnx2x_config_rss_params rss;
102 struct bnx2x_vfop_qctor_params qctor;
103};
104
105/* forward */ 99/* forward */
106struct bnx2x_virtf; 100struct bnx2x_virtf;
107 101
108/* VFOP definitions */ 102/* VFOP definitions */
109typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
110
111struct bnx2x_vfop_cmd {
112 vfop_handler_t done;
113 bool block;
114};
115 103
116/* VFOP queue filters command additional arguments */ 104struct bnx2x_vf_mac_vlan_filter {
117struct bnx2x_vfop_filter {
118 struct list_head link;
119 int type; 105 int type;
120#define BNX2X_VFOP_FILTER_MAC 1 106#define BNX2X_VF_FILTER_MAC 1
121#define BNX2X_VFOP_FILTER_VLAN 2 107#define BNX2X_VF_FILTER_VLAN 2
122 108
123 bool add; 109 bool add;
124 u8 *mac; 110 u8 *mac;
125 u16 vid; 111 u16 vid;
126}; 112};
127 113
128struct bnx2x_vfop_filters { 114struct bnx2x_vf_mac_vlan_filters {
129 int add_cnt; 115 int count;
130 struct list_head head; 116 struct bnx2x_vf_mac_vlan_filter filters[];
131 struct bnx2x_vfop_filter filters[];
132};
133
134/* transient list allocated, built and saved until its
135 * passed to the SP-VERBs layer.
136 */
137struct bnx2x_vfop_args_mcast {
138 int mc_num;
139 struct bnx2x_mcast_list_elem *mc;
140};
141
142struct bnx2x_vfop_args_qctor {
143 int qid;
144 u16 sb_idx;
145};
146
147struct bnx2x_vfop_args_qdtor {
148 int qid;
149 struct eth_context *cxt;
150};
151
152struct bnx2x_vfop_args_defvlan {
153 int qid;
154 bool enable;
155 u16 vid;
156 u8 prio;
157};
158
159struct bnx2x_vfop_args_qx {
160 int qid;
161 bool en_add;
162};
163
164struct bnx2x_vfop_args_filters {
165 struct bnx2x_vfop_filters *multi_filter;
166 atomic_t *credit; /* non NULL means 'don't consume credit' */
167};
168
169union bnx2x_vfop_args {
170 struct bnx2x_vfop_args_mcast mc_list;
171 struct bnx2x_vfop_args_qctor qctor;
172 struct bnx2x_vfop_args_qdtor qdtor;
173 struct bnx2x_vfop_args_defvlan defvlan;
174 struct bnx2x_vfop_args_qx qx;
175 struct bnx2x_vfop_args_filters filters;
176};
177
178struct bnx2x_vfop {
179 struct list_head link;
180 int rc; /* return code */
181 int state; /* next state */
182 union bnx2x_vfop_args args; /* extra arguments */
183 union bnx2x_vfop_params *op_p; /* ramrod params */
184
185 /* state machine callbacks */
186 vfop_handler_t transition;
187 vfop_handler_t done;
188}; 117};
189 118
190/* vf context */ 119/* vf context */
@@ -204,15 +133,7 @@ struct bnx2x_virtf {
204#define VF_ENABLED 2 /* VF Enabled */ 133#define VF_ENABLED 2 /* VF Enabled */
205#define VF_RESET 3 /* VF FLR'd, pending cleanup */ 134#define VF_RESET 3 /* VF FLR'd, pending cleanup */
206 135
207 /* non 0 during flr cleanup */ 136 bool flr_clnup_stage; /* true during flr cleanup */
208 u8 flr_clnup_stage;
209#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
210 * sans the end-wait
211 */
212#define VF_FLR_ACK 2 /* ACK flr notification */
213#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
214 * ~ final cleanup' end wait
215 */
216 137
217 /* dma */ 138 /* dma */
218 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ 139 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
@@ -276,11 +197,6 @@ struct bnx2x_virtf {
276 struct bnx2x_rss_config_obj rss_conf_obj; 197 struct bnx2x_rss_config_obj rss_conf_obj;
277 198
278 /* slow-path operations */ 199 /* slow-path operations */
279 atomic_t op_in_progress;
280 int op_rc;
281 bool op_wait_blocking;
282 struct list_head op_list_head;
283 union bnx2x_vfop_params op_params;
284 struct mutex op_mutex; /* one vfop at a time mutex */ 200 struct mutex op_mutex; /* one vfop at a time mutex */
285 enum channel_tlvs op_current; 201 enum channel_tlvs op_current;
286}; 202};
@@ -338,11 +254,6 @@ struct bnx2x_vf_mbx {
338 u32 vf_addr_hi; 254 u32 vf_addr_hi;
339 255
340 struct vfpf_first_tlv first_tlv; /* saved VF request header */ 256 struct vfpf_first_tlv first_tlv; /* saved VF request header */
341
342 u8 flags;
343#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
344 * more then one pending msg
345 */
346}; 257};
347 258
348struct bnx2x_vf_sp { 259struct bnx2x_vf_sp {
@@ -419,6 +330,10 @@ struct bnx2x_vfdb {
419 /* the number of msix vectors belonging to this PF designated for VFs */ 330 /* the number of msix vectors belonging to this PF designated for VFs */
420 u16 vf_sbs_pool; 331 u16 vf_sbs_pool;
421 u16 first_vf_igu_entry; 332 u16 first_vf_igu_entry;
333
334 /* sp_rtnl synchronization */
335 struct mutex event_mutex;
336 u64 event_occur;
422}; 337};
423 338
424/* queue access */ 339/* queue access */
@@ -468,13 +383,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
468void bnx2x_iov_init_dmae(struct bnx2x *bp); 383void bnx2x_iov_init_dmae(struct bnx2x *bp);
469void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 384void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
470 struct bnx2x_queue_sp_obj **q_obj); 385 struct bnx2x_queue_sp_obj **q_obj);
471void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
472int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); 386int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
473void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); 387void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
474void bnx2x_iov_storm_stats_update(struct bnx2x *bp); 388void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
475void bnx2x_iov_sp_task(struct bnx2x *bp);
476/* global vf mailbox routines */ 389/* global vf mailbox routines */
477void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); 390void bnx2x_vf_mbx(struct bnx2x *bp);
391void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
392 struct vf_pf_event_data *vfpf_event);
478void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); 393void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
479 394
480/* CORE VF API */ 395/* CORE VF API */
@@ -487,162 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
487int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 402int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
488 dma_addr_t *sb_map); 403 dma_addr_t *sb_map);
489 404
490/* VFOP generic helpers */
491#define bnx2x_vfop_default(state) do { \
492 BNX2X_ERR("Bad state %d\n", (state)); \
493 vfop->rc = -EINVAL; \
494 goto op_err; \
495 } while (0)
496
497enum {
498 VFOP_DONE,
499 VFOP_CONT,
500 VFOP_VERIFY_PEND,
501};
502
503#define bnx2x_vfop_finalize(vf, rc, next) do { \
504 if ((rc) < 0) \
505 goto op_err; \
506 else if ((rc) > 0) \
507 goto op_pending; \
508 else if ((next) == VFOP_DONE) \
509 goto op_done; \
510 else if ((next) == VFOP_VERIFY_PEND) \
511 BNX2X_ERR("expected pending\n"); \
512 else { \
513 DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
514 atomic_set(&vf->op_in_progress, 1); \
515 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
516 return; \
517 } \
518 } while (0)
519
520#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
521 do { \
522 vfop->state = first_state; \
523 vfop->op_p = &vf->op_params; \
524 vfop->transition = trans_hndlr; \
525 vfop->done = done_hndlr; \
526 } while (0)
527
528static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
529 struct bnx2x_virtf *vf)
530{
531 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
532 WARN_ON(list_empty(&vf->op_list_head));
533 return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
534}
535
536static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
537 struct bnx2x_virtf *vf)
538{
539 struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
540
541 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
542 if (vfop) {
543 INIT_LIST_HEAD(&vfop->link);
544 list_add(&vfop->link, &vf->op_list_head);
545 }
546 return vfop;
547}
548
549static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
550 struct bnx2x_vfop *vfop)
551{
552 /* rc < 0 - error, otherwise set to 0 */
553 DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
554 if (vfop->rc >= 0)
555 vfop->rc = 0;
556 DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
557
558 /* unlink the current op context and propagate error code
559 * must be done before invoking the 'done()' handler
560 */
561 WARN(!mutex_is_locked(&vf->op_mutex),
562 "about to access vf op linked list but mutex was not locked!");
563 list_del(&vfop->link);
564
565 if (list_empty(&vf->op_list_head)) {
566 DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
567 vf->op_rc = vfop->rc;
568 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
569 vf->op_rc, vfop->rc);
570 } else {
571 struct bnx2x_vfop *cur_vfop;
572
573 DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
574 cur_vfop = bnx2x_vfop_cur(bp, vf);
575 cur_vfop->rc = vfop->rc;
576 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
577 vf->op_rc, vfop->rc);
578 }
579
580 /* invoke done handler */
581 if (vfop->done) {
582 DP(BNX2X_MSG_IOV, "calling done handler\n");
583 vfop->done(bp, vf);
584 } else {
585 /* there is no done handler for the operation to unlock
586 * the mutex. Must have gotten here from PF initiated VF RELEASE
587 */
588 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
589 }
590
591 DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
592 vf->op_rc, vfop->rc);
593
594 /* if this is the last nested op reset the wait_blocking flag
595 * to release any blocking wrappers, only after 'done()' is invoked
596 */
597 if (list_empty(&vf->op_list_head)) {
598 DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
599 vf->op_wait_blocking = false;
600 }
601
602 kfree(vfop);
603}
604
605static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
606 struct bnx2x_virtf *vf)
607{
608 /* can take a while if any port is running */
609 int cnt = 5000;
610
611 might_sleep();
612 while (cnt--) {
613 if (vf->op_wait_blocking == false) {
614#ifdef BNX2X_STOP_ON_ERROR
615 DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
616#endif
617 return 0;
618 }
619 usleep_range(1000, 2000);
620
621 if (bp->panic)
622 return -EIO;
623 }
624
625 /* timeout! */
626#ifdef BNX2X_STOP_ON_ERROR
627 bnx2x_panic();
628#endif
629
630 return -EBUSY;
631}
632
633static inline int bnx2x_vfop_transition(struct bnx2x *bp,
634 struct bnx2x_virtf *vf,
635 vfop_handler_t transition,
636 bool block)
637{
638 if (block)
639 vf->op_wait_blocking = true;
640 transition(bp, vf);
641 if (block)
642 return bnx2x_vfop_wait_blocking(bp, vf);
643 return 0;
644}
645
646/* VFOP queue construction helpers */ 405/* VFOP queue construction helpers */
647void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 406void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
648 struct bnx2x_queue_init_params *init_params, 407 struct bnx2x_queue_init_params *init_params,
@@ -657,59 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
657void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 416void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
658 struct bnx2x_virtf *vf, 417 struct bnx2x_virtf *vf,
659 struct bnx2x_vf_queue *q, 418 struct bnx2x_vf_queue *q,
660 struct bnx2x_vfop_qctor_params *p, 419 struct bnx2x_vf_queue_construct_params *p,
661 unsigned long q_type); 420 unsigned long q_type);
662int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
663 struct bnx2x_virtf *vf,
664 struct bnx2x_vfop_cmd *cmd,
665 struct bnx2x_vfop_filters *macs,
666 int qid, bool drv_only);
667
668int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
669 struct bnx2x_virtf *vf,
670 struct bnx2x_vfop_cmd *cmd,
671 struct bnx2x_vfop_filters *vlans,
672 int qid, bool drv_only);
673
674int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
675 struct bnx2x_virtf *vf,
676 struct bnx2x_vfop_cmd *cmd,
677 int qid);
678
679int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
680 struct bnx2x_virtf *vf,
681 struct bnx2x_vfop_cmd *cmd,
682 int qid);
683
684int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
685 struct bnx2x_virtf *vf,
686 struct bnx2x_vfop_cmd *cmd,
687 bnx2x_mac_addr_t *mcasts,
688 int mcast_num, bool drv_only);
689
690int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
691 struct bnx2x_virtf *vf,
692 struct bnx2x_vfop_cmd *cmd,
693 int qid, unsigned long accept_flags);
694
695int bnx2x_vfop_close_cmd(struct bnx2x *bp,
696 struct bnx2x_virtf *vf,
697 struct bnx2x_vfop_cmd *cmd);
698
699int bnx2x_vfop_release_cmd(struct bnx2x *bp,
700 struct bnx2x_virtf *vf,
701 struct bnx2x_vfop_cmd *cmd);
702 421
703int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 422int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
704 struct bnx2x_virtf *vf, 423 struct bnx2x_vf_mac_vlan_filters *filters,
705 struct bnx2x_vfop_cmd *cmd); 424 int qid, bool drv_only);
425
426int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
427 struct bnx2x_vf_queue_construct_params *qctor);
428
429int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
430
431int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
432 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
433
434int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
435 int qid, unsigned long accept_flags);
436
437int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
438
439int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
440
441int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
442 struct bnx2x_config_rss_params *rss);
443
444int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
445 struct vfpf_tpa_tlv *tlv,
446 struct bnx2x_queue_update_tpa_params *params);
706 447
707/* VF release ~ VF close + VF release-resources 448/* VF release ~ VF close + VF release-resources
708 * 449 *
709 * Release is the ultimate SW shutdown and is called whenever an 450 * Release is the ultimate SW shutdown and is called whenever an
710 * irrecoverable error is encountered. 451 * irrecoverable error is encountered.
711 */ 452 */
712void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); 453int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
713int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); 454int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
714u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); 455u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
715 456
@@ -772,18 +513,20 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
772int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 513int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
773void bnx2x_iov_channel_down(struct bnx2x *bp); 514void bnx2x_iov_channel_down(struct bnx2x *bp);
774 515
516void bnx2x_iov_task(struct work_struct *work);
517
518void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
519
775#else /* CONFIG_BNX2X_SRIOV */ 520#else /* CONFIG_BNX2X_SRIOV */
776 521
777static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 522static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
778 struct bnx2x_queue_sp_obj **q_obj) {} 523 struct bnx2x_queue_sp_obj **q_obj) {}
779static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
780 bool queue_work) {}
781static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} 524static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
782static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, 525static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
783 union event_ring_elem *elem) {return 1; } 526 union event_ring_elem *elem) {return 1; }
784static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} 527static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
785static inline void bnx2x_vf_mbx(struct bnx2x *bp, 528static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
786 struct vf_pf_event_data *vfpf_event) {} 529 struct vf_pf_event_data *vfpf_event) {}
787static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } 530static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
788static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} 531static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
789static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } 532static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
@@ -830,5 +573,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
830static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 573static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
831static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} 574static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
832 575
576static inline void bnx2x_iov_task(struct work_struct *work) {}
577static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
578
833#endif /* CONFIG_BNX2X_SRIOV */ 579#endif /* CONFIG_BNX2X_SRIOV */
834#endif /* bnx2x_sriov.h */ 580#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 3fa6c2a2a5a9..0622884596b2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
548 548
549 vf->leading_rss = cl_id; 549 vf->leading_rss = cl_id;
550 q->is_leading = true; 550 q->is_leading = true;
551 q->sp_initialized = true;
551} 552}
552 553
553/* ask the pf to open a queue for the vf */ 554/* ask the pf to open a queue for the vf */
@@ -672,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
672 673
673out: 674out:
674 bnx2x_vfpf_finalize(bp, &req->first_tlv); 675 bnx2x_vfpf_finalize(bp, &req->first_tlv);
676
675 return rc; 677 return rc;
676} 678}
677 679
@@ -894,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
894 896
895 DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); 897 DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
896 898
897 switch (mode) { 899 /* Ignore everything accept MODE_NONE */
898 case BNX2X_RX_MODE_NONE: /* no Rx */ 900 if (mode == BNX2X_RX_MODE_NONE) {
899 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; 901 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
900 break; 902 } else {
901 case BNX2X_RX_MODE_NORMAL: 903 /* Current PF driver will not look at the specific flags,
904 * but they are required when working with older drivers on hv.
905 */
902 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; 906 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
903 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; 907 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
904 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; 908 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
905 break;
906 case BNX2X_RX_MODE_ALLMULTI:
907 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
908 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
909 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
910 break;
911 case BNX2X_RX_MODE_PROMISC:
912 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
913 req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
914 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
915 break;
916 default:
917 BNX2X_ERR("BAD rx mode (%d)\n", mode);
918 rc = -EINVAL;
919 goto out;
920 } 909 }
921 910
922 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; 911 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -937,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
937 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); 926 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
938 rc = -EINVAL; 927 rc = -EINVAL;
939 } 928 }
940out: 929
941 bnx2x_vfpf_finalize(bp, &req->first_tlv); 930 bnx2x_vfpf_finalize(bp, &req->first_tlv);
942 931
943 return rc; 932 return rc;
@@ -1047,7 +1036,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
1047} 1036}
1048 1037
1049static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, 1038static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1050 struct bnx2x_virtf *vf) 1039 struct bnx2x_virtf *vf,
1040 int vf_rc)
1051{ 1041{
1052 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); 1042 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1053 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; 1043 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
@@ -1059,7 +1049,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1059 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", 1049 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1060 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); 1050 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1061 1051
1062 resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); 1052 resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
1063 1053
1064 /* send response */ 1054 /* send response */
1065 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + 1055 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
@@ -1088,9 +1078,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1088 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1078 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1089 mmiowb(); 1079 mmiowb();
1090 1080
1091 /* initiate dmae to send the response */
1092 mbx->flags &= ~VF_MSG_INPROCESS;
1093
1094 /* copy the response header including status-done field, 1081 /* copy the response header including status-done field,
1095 * must be last dmae, must be after FW is acked 1082 * must be last dmae, must be after FW is acked
1096 */ 1083 */
@@ -1110,14 +1097,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1110 return; 1097 return;
1111 1098
1112mbx_error: 1099mbx_error:
1113 bnx2x_vf_release(bp, vf, false); /* non blocking */ 1100 bnx2x_vf_release(bp, vf);
1114} 1101}
1115 1102
1116static void bnx2x_vf_mbx_resp(struct bnx2x *bp, 1103static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
1117 struct bnx2x_virtf *vf) 1104 struct bnx2x_virtf *vf,
1105 int rc)
1118{ 1106{
1119 bnx2x_vf_mbx_resp_single_tlv(bp, vf); 1107 bnx2x_vf_mbx_resp_single_tlv(bp, vf);
1120 bnx2x_vf_mbx_resp_send_msg(bp, vf); 1108 bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
1121} 1109}
1122 1110
1123static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, 1111static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
@@ -1159,7 +1147,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1159 resp->pfdev_info.db_size = bp->db_size; 1147 resp->pfdev_info.db_size = bp->db_size;
1160 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; 1148 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1161 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | 1149 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1162 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); 1150 PFVF_CAP_TPA |
1151 PFVF_CAP_TPA_UPDATE);
1163 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, 1152 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1164 sizeof(resp->pfdev_info.fw_ver)); 1153 sizeof(resp->pfdev_info.fw_ver));
1165 1154
@@ -1240,8 +1229,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1240 sizeof(struct channel_list_end_tlv)); 1229 sizeof(struct channel_list_end_tlv));
1241 1230
1242 /* send the response */ 1231 /* send the response */
1243 vf->op_rc = vfop_status; 1232 bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
1244 bnx2x_vf_mbx_resp_send_msg(bp, vf);
1245} 1233}
1246 1234
1247static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 1235static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1273,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1273 struct bnx2x_vf_mbx *mbx) 1261 struct bnx2x_vf_mbx *mbx)
1274{ 1262{
1275 struct vfpf_init_tlv *init = &mbx->msg->req.init; 1263 struct vfpf_init_tlv *init = &mbx->msg->req.init;
1264 int rc;
1276 1265
1277 /* record ghost addresses from vf message */ 1266 /* record ghost addresses from vf message */
1278 vf->spq_map = init->spq_addr; 1267 vf->spq_map = init->spq_addr;
1279 vf->fw_stat_map = init->stats_addr; 1268 vf->fw_stat_map = init->stats_addr;
1280 vf->stats_stride = init->stats_stride; 1269 vf->stats_stride = init->stats_stride;
1281 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); 1270 rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1282 1271
1283 /* set VF multiqueue statistics collection mode */ 1272 /* set VF multiqueue statistics collection mode */
1284 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) 1273 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1285 vf->cfg_flags |= VF_CFG_STATS_COALESCE; 1274 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1286 1275
1287 /* response */ 1276 /* response */
1288 bnx2x_vf_mbx_resp(bp, vf); 1277 bnx2x_vf_mbx_resp(bp, vf, rc);
1289} 1278}
1290 1279
1291/* convert MBX queue-flags to standard SP queue-flags */ 1280/* convert MBX queue-flags to standard SP queue-flags */
@@ -1320,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1320 struct bnx2x_vf_mbx *mbx) 1309 struct bnx2x_vf_mbx *mbx)
1321{ 1310{
1322 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; 1311 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1323 struct bnx2x_vfop_cmd cmd = { 1312 struct bnx2x_vf_queue_construct_params qctor;
1324 .done = bnx2x_vf_mbx_resp, 1313 int rc = 0;
1325 .block = false,
1326 };
1327 1314
1328 /* verify vf_qid */ 1315 /* verify vf_qid */
1329 if (setup_q->vf_qid >= vf_rxq_count(vf)) { 1316 if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1330 BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", 1317 BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1331 setup_q->vf_qid, vf_rxq_count(vf)); 1318 setup_q->vf_qid, vf_rxq_count(vf));
1332 vf->op_rc = -EINVAL; 1319 rc = -EINVAL;
1333 goto response; 1320 goto response;
1334 } 1321 }
1335 1322
@@ -1347,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1347 bnx2x_leading_vfq_init(bp, vf, q); 1334 bnx2x_leading_vfq_init(bp, vf, q);
1348 1335
1349 /* re-init the VF operation context */ 1336 /* re-init the VF operation context */
1350 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); 1337 memset(&qctor, 0 ,
1351 setup_p = &vf->op_params.qctor.prep_qsetup; 1338 sizeof(struct bnx2x_vf_queue_construct_params));
1352 init_p = &vf->op_params.qctor.qstate.params.init; 1339 setup_p = &qctor.prep_qsetup;
1340 init_p = &qctor.qstate.params.init;
1353 1341
1354 /* activate immediately */ 1342 /* activate immediately */
1355 __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); 1343 __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
@@ -1435,44 +1423,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1435 q->index, q->sb_idx); 1423 q->index, q->sb_idx);
1436 } 1424 }
1437 /* complete the preparations */ 1425 /* complete the preparations */
1438 bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); 1426 bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
1439 1427
1440 vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); 1428 rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
1441 if (vf->op_rc) 1429 if (rc)
1442 goto response; 1430 goto response;
1443 return;
1444 } 1431 }
1445response: 1432response:
1446 bnx2x_vf_mbx_resp(bp, vf); 1433 bnx2x_vf_mbx_resp(bp, vf, rc);
1447} 1434}
1448 1435
1449enum bnx2x_vfop_filters_state {
1450 BNX2X_VFOP_MBX_Q_FILTERS_MACS,
1451 BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
1452 BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
1453 BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
1454 BNX2X_VFOP_MBX_Q_FILTERS_DONE
1455};
1456
1457static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, 1436static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1458 struct bnx2x_virtf *vf, 1437 struct bnx2x_virtf *vf,
1459 struct vfpf_set_q_filters_tlv *tlv, 1438 struct vfpf_set_q_filters_tlv *tlv,
1460 struct bnx2x_vfop_filters **pfl, 1439 struct bnx2x_vf_mac_vlan_filters **pfl,
1461 u32 type_flag) 1440 u32 type_flag)
1462{ 1441{
1463 int i, j; 1442 int i, j;
1464 struct bnx2x_vfop_filters *fl = NULL; 1443 struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1465 size_t fsz; 1444 size_t fsz;
1466 1445
1467 fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + 1446 fsz = tlv->n_mac_vlan_filters *
1468 sizeof(struct bnx2x_vfop_filters); 1447 sizeof(struct bnx2x_vf_mac_vlan_filter) +
1448 sizeof(struct bnx2x_vf_mac_vlan_filters);
1469 1449
1470 fl = kzalloc(fsz, GFP_KERNEL); 1450 fl = kzalloc(fsz, GFP_KERNEL);
1471 if (!fl) 1451 if (!fl)
1472 return -ENOMEM; 1452 return -ENOMEM;
1473 1453
1474 INIT_LIST_HEAD(&fl->head);
1475
1476 for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { 1454 for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1477 struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; 1455 struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1478 1456
@@ -1480,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1480 continue; 1458 continue;
1481 if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { 1459 if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
1482 fl->filters[j].mac = msg_filter->mac; 1460 fl->filters[j].mac = msg_filter->mac;
1483 fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; 1461 fl->filters[j].type = BNX2X_VF_FILTER_MAC;
1484 } else { 1462 } else {
1485 fl->filters[j].vid = msg_filter->vlan_tag; 1463 fl->filters[j].vid = msg_filter->vlan_tag;
1486 fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; 1464 fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
1487 } 1465 }
1488 fl->filters[j].add = 1466 fl->filters[j].add =
1489 (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? 1467 (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
1490 true : false; 1468 true : false;
1491 list_add_tail(&fl->filters[j++].link, &fl->head); 1469 fl->count++;
1492 } 1470 }
1493 if (list_empty(&fl->head)) 1471 if (!fl->count)
1494 kfree(fl); 1472 kfree(fl);
1495 else 1473 else
1496 *pfl = fl; 1474 *pfl = fl;
@@ -1530,180 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1530#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID 1508#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
1531#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID 1509#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
1532 1510
1533static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) 1511static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1534{ 1512{
1535 int rc; 1513 int rc = 0;
1536 1514
1537 struct vfpf_set_q_filters_tlv *msg = 1515 struct vfpf_set_q_filters_tlv *msg =
1538 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; 1516 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1539 1517
1540 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1518 /* check for any mac/vlan changes */
1541 enum bnx2x_vfop_filters_state state = vfop->state; 1519 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1542 1520 /* build mac list */
1543 struct bnx2x_vfop_cmd cmd = { 1521 struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1544 .done = bnx2x_vfop_mbx_qfilters,
1545 .block = false,
1546 };
1547
1548 DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
1549
1550 if (vfop->rc < 0)
1551 goto op_err;
1552 1522
1553 switch (state) { 1523 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1554 case BNX2X_VFOP_MBX_Q_FILTERS_MACS: 1524 VFPF_MAC_FILTER);
1555 /* next state */ 1525 if (rc)
1556 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; 1526 goto op_err;
1557 1527
1558 /* check for any vlan/mac changes */ 1528 if (fl) {
1559 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1560 /* build mac list */
1561 struct bnx2x_vfop_filters *fl = NULL;
1562 1529
1563 vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, 1530 /* set mac list */
1564 VFPF_MAC_FILTER); 1531 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1565 if (vfop->rc) 1532 msg->vf_qid,
1533 false);
1534 if (rc)
1566 goto op_err; 1535 goto op_err;
1567
1568 if (fl) {
1569 /* set mac list */
1570 rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
1571 msg->vf_qid,
1572 false);
1573 if (rc) {
1574 vfop->rc = rc;
1575 goto op_err;
1576 }
1577 return;
1578 }
1579 } 1536 }
1580 /* fall through */
1581 1537
1582 case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: 1538 /* build vlan list */
1583 /* next state */ 1539 fl = NULL;
1584 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
1585 1540
1586 /* check for any vlan/mac changes */ 1541 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1587 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { 1542 VFPF_VLAN_FILTER);
1588 /* build vlan list */ 1543 if (rc)
1589 struct bnx2x_vfop_filters *fl = NULL; 1544 goto op_err;
1590 1545
1591 vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, 1546 if (fl) {
1592 VFPF_VLAN_FILTER); 1547 /* set vlan list */
1593 if (vfop->rc) 1548 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1549 msg->vf_qid,
1550 false);
1551 if (rc)
1594 goto op_err; 1552 goto op_err;
1595
1596 if (fl) {
1597 /* set vlan list */
1598 rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
1599 msg->vf_qid,
1600 false);
1601 if (rc) {
1602 vfop->rc = rc;
1603 goto op_err;
1604 }
1605 return;
1606 }
1607 } 1553 }
1608 /* fall through */ 1554 }
1609
1610 case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
1611 /* next state */
1612 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
1613
1614 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1615 unsigned long accept = 0;
1616 struct pf_vf_bulletin_content *bulletin =
1617 BP_VF_BULLETIN(bp, vf->index);
1618
1619 /* covert VF-PF if mask to bnx2x accept flags */
1620 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
1621 __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1622
1623 if (msg->rx_mask &
1624 VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
1625 __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1626
1627 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
1628 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
1629
1630 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
1631 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
1632 1555
1633 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) 1556 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1634 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); 1557 unsigned long accept = 0;
1558 struct pf_vf_bulletin_content *bulletin =
1559 BP_VF_BULLETIN(bp, vf->index);
1635 1560
1636 /* A packet arriving the vf's mac should be accepted 1561 /* Ignore VF requested mode; instead set a regular mode */
1637 * with any vlan, unless a vlan has already been 1562 if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
1638 * configured. 1563 __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1639 */ 1564 __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1640 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) 1565 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1641 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1642
1643 /* set rx-mode */
1644 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
1645 msg->vf_qid, accept);
1646 if (rc) {
1647 vfop->rc = rc;
1648 goto op_err;
1649 }
1650 return;
1651 } 1566 }
1652 /* fall through */
1653
1654 case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
1655 /* next state */
1656 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
1657
1658 if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1659 /* set mcasts */
1660 rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
1661 msg->n_multicast, false);
1662 if (rc) {
1663 vfop->rc = rc;
1664 goto op_err;
1665 }
1666 return;
1667 }
1668 /* fall through */
1669op_done:
1670 case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
1671 bnx2x_vfop_end(bp, vf, vfop);
1672 return;
1673op_err:
1674 BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1675 vf->abs_vfid, msg->vf_qid, vfop->rc);
1676 goto op_done;
1677 1567
1678 default: 1568 /* A packet arriving the vf's mac should be accepted
1679 bnx2x_vfop_default(state); 1569 * with any vlan, unless a vlan has already been
1570 * configured.
1571 */
1572 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1573 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1574
1575 /* set rx-mode */
1576 rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
1577 if (rc)
1578 goto op_err;
1680 } 1579 }
1681}
1682 1580
1683static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, 1581 if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1684 struct bnx2x_virtf *vf, 1582 /* set mcasts */
1685 struct bnx2x_vfop_cmd *cmd) 1583 rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
1686{ 1584 msg->n_multicast, false);
1687 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1585 if (rc)
1688 if (vfop) { 1586 goto op_err;
1689 bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
1690 bnx2x_vfop_mbx_qfilters, cmd->done);
1691 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
1692 cmd->block);
1693 } 1587 }
1694 return -ENOMEM; 1588op_err:
1589 if (rc)
1590 BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1591 vf->abs_vfid, msg->vf_qid, rc);
1592 return rc;
1695} 1593}
1696 1594
1697static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, 1595static int bnx2x_filters_validate_mac(struct bnx2x *bp,
1698 struct bnx2x_virtf *vf, 1596 struct bnx2x_virtf *vf,
1699 struct bnx2x_vf_mbx *mbx) 1597 struct vfpf_set_q_filters_tlv *filters)
1700{ 1598{
1701 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1702 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); 1599 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1703 struct bnx2x_vfop_cmd cmd = { 1600 int rc = 0;
1704 .done = bnx2x_vf_mbx_resp,
1705 .block = false,
1706 };
1707 1601
1708 /* if a mac was already set for this VF via the set vf mac ndo, we only 1602 /* if a mac was already set for this VF via the set vf mac ndo, we only
1709 * accept mac configurations of that mac. Why accept them at all? 1603 * accept mac configurations of that mac. Why accept them at all?
@@ -1715,7 +1609,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1715 if (filters->n_mac_vlan_filters > 1) { 1609 if (filters->n_mac_vlan_filters > 1) {
1716 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", 1610 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
1717 vf->abs_vfid); 1611 vf->abs_vfid);
1718 vf->op_rc = -EPERM; 1612 rc = -EPERM;
1719 goto response; 1613 goto response;
1720 } 1614 }
1721 1615
@@ -1725,10 +1619,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1725 BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", 1619 BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1726 vf->abs_vfid); 1620 vf->abs_vfid);
1727 1621
1728 vf->op_rc = -EPERM; 1622 rc = -EPERM;
1729 goto response; 1623 goto response;
1730 } 1624 }
1731 } 1625 }
1626
1627response:
1628 return rc;
1629}
1630
1631static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
1632 struct bnx2x_virtf *vf,
1633 struct vfpf_set_q_filters_tlv *filters)
1634{
1635 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1636 int rc = 0;
1637
1732 /* if vlan was set by hypervisor we don't allow guest to config vlan */ 1638 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1733 if (bulletin->valid_bitmap & 1 << VLAN_VALID) { 1639 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1734 int i; 1640 int i;
@@ -1739,14 +1645,35 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1739 VFPF_Q_FILTER_VLAN_TAG_VALID) { 1645 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1740 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", 1646 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1741 vf->abs_vfid); 1647 vf->abs_vfid);
1742 vf->op_rc = -EPERM; 1648 rc = -EPERM;
1743 goto response; 1649 goto response;
1744 } 1650 }
1745 } 1651 }
1746 } 1652 }
1747 1653
1748 /* verify vf_qid */ 1654 /* verify vf_qid */
1749 if (filters->vf_qid > vf_rxq_count(vf)) 1655 if (filters->vf_qid > vf_rxq_count(vf)) {
1656 rc = -EPERM;
1657 goto response;
1658 }
1659
1660response:
1661 return rc;
1662}
1663
1664static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1665 struct bnx2x_virtf *vf,
1666 struct bnx2x_vf_mbx *mbx)
1667{
1668 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1669 int rc;
1670
1671 rc = bnx2x_filters_validate_mac(bp, vf, filters);
1672 if (rc)
1673 goto response;
1674
1675 rc = bnx2x_filters_validate_vlan(bp, vf, filters);
1676 if (rc)
1750 goto response; 1677 goto response;
1751 1678
1752 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", 1679 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1756,125 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1756 /* print q_filter message */ 1683 /* print q_filter message */
1757 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); 1684 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1758 1685
1759 vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); 1686 rc = bnx2x_vf_mbx_qfilters(bp, vf);
1760 if (vf->op_rc)
1761 goto response;
1762 return;
1763
1764response: 1687response:
1765 bnx2x_vf_mbx_resp(bp, vf); 1688 bnx2x_vf_mbx_resp(bp, vf, rc);
1766} 1689}
1767 1690
1768static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, 1691static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1769 struct bnx2x_vf_mbx *mbx) 1692 struct bnx2x_vf_mbx *mbx)
1770{ 1693{
1771 int qid = mbx->msg->req.q_op.vf_qid; 1694 int qid = mbx->msg->req.q_op.vf_qid;
1772 struct bnx2x_vfop_cmd cmd = { 1695 int rc;
1773 .done = bnx2x_vf_mbx_resp,
1774 .block = false,
1775 };
1776 1696
1777 DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", 1697 DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1778 vf->abs_vfid, qid); 1698 vf->abs_vfid, qid);
1779 1699
1780 vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); 1700 rc = bnx2x_vf_queue_teardown(bp, vf, qid);
1781 if (vf->op_rc) 1701 bnx2x_vf_mbx_resp(bp, vf, rc);
1782 bnx2x_vf_mbx_resp(bp, vf);
1783} 1702}
1784 1703
1785static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, 1704static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1786 struct bnx2x_vf_mbx *mbx) 1705 struct bnx2x_vf_mbx *mbx)
1787{ 1706{
1788 struct bnx2x_vfop_cmd cmd = { 1707 int rc;
1789 .done = bnx2x_vf_mbx_resp,
1790 .block = false,
1791 };
1792 1708
1793 DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); 1709 DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1794 1710
1795 vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 1711 rc = bnx2x_vf_close(bp, vf);
1796 if (vf->op_rc) 1712 bnx2x_vf_mbx_resp(bp, vf, rc);
1797 bnx2x_vf_mbx_resp(bp, vf);
1798} 1713}
1799 1714
1800static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, 1715static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1801 struct bnx2x_vf_mbx *mbx) 1716 struct bnx2x_vf_mbx *mbx)
1802{ 1717{
1803 struct bnx2x_vfop_cmd cmd = { 1718 int rc;
1804 .done = bnx2x_vf_mbx_resp,
1805 .block = false,
1806 };
1807 1719
1808 DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); 1720 DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1809 1721
1810 vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 1722 rc = bnx2x_vf_free(bp, vf);
1811 if (vf->op_rc) 1723 bnx2x_vf_mbx_resp(bp, vf, rc);
1812 bnx2x_vf_mbx_resp(bp, vf);
1813} 1724}
1814 1725
1815static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, 1726static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1816 struct bnx2x_vf_mbx *mbx) 1727 struct bnx2x_vf_mbx *mbx)
1817{ 1728{
1818 struct bnx2x_vfop_cmd cmd = { 1729 struct bnx2x_config_rss_params rss;
1819 .done = bnx2x_vf_mbx_resp,
1820 .block = false,
1821 };
1822 struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
1823 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; 1730 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1731 int rc = 0;
1824 1732
1825 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || 1733 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1826 rss_tlv->rss_key_size != T_ETH_RSS_KEY) { 1734 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1827 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", 1735 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1828 vf->index); 1736 vf->index);
1829 vf->op_rc = -EINVAL; 1737 rc = -EINVAL;
1830 goto mbx_resp; 1738 goto mbx_resp;
1831 } 1739 }
1832 1740
1741 memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
1742
1833 /* set vfop params according to rss tlv */ 1743 /* set vfop params according to rss tlv */
1834 memcpy(vf_op_params->ind_table, rss_tlv->ind_table, 1744 memcpy(rss.ind_table, rss_tlv->ind_table,
1835 T_ETH_INDIRECTION_TABLE_SIZE); 1745 T_ETH_INDIRECTION_TABLE_SIZE);
1836 memcpy(vf_op_params->rss_key, rss_tlv->rss_key, 1746 memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
1837 sizeof(rss_tlv->rss_key)); 1747 rss.rss_obj = &vf->rss_conf_obj;
1838 vf_op_params->rss_obj = &vf->rss_conf_obj; 1748 rss.rss_result_mask = rss_tlv->rss_result_mask;
1839 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1840 1749
1841 /* flags handled individually for backward/forward compatability */ 1750 /* flags handled individually for backward/forward compatability */
1842 vf_op_params->rss_flags = 0; 1751 rss.rss_flags = 0;
1843 vf_op_params->ramrod_flags = 0; 1752 rss.ramrod_flags = 0;
1844 1753
1845 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) 1754 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1846 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); 1755 __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
1847 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) 1756 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
1848 __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); 1757 __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
1849 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) 1758 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
1850 __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); 1759 __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
1851 if (rss_tlv->rss_flags & VFPF_RSS_IPV4) 1760 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
1852 __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); 1761 __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
1853 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) 1762 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
1854 __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); 1763 __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
1855 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) 1764 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
1856 __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); 1765 __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
1857 if (rss_tlv->rss_flags & VFPF_RSS_IPV6) 1766 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
1858 __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); 1767 __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
1859 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) 1768 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
1860 __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); 1769 __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
1861 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) 1770 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
1862 __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); 1771 __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
1863 1772
1864 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && 1773 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
1865 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || 1774 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
1866 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && 1775 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
1867 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { 1776 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
1868 BNX2X_ERR("about to hit a FW assert. aborting...\n"); 1777 BNX2X_ERR("about to hit a FW assert. aborting...\n");
1869 vf->op_rc = -EINVAL; 1778 rc = -EINVAL;
1870 goto mbx_resp; 1779 goto mbx_resp;
1871 } 1780 }
1872 1781
1873 vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); 1782 rc = bnx2x_vf_rss_update(bp, vf, &rss);
1783mbx_resp:
1784 bnx2x_vf_mbx_resp(bp, vf, rc);
1785}
1786
1787static int bnx2x_validate_tpa_params(struct bnx2x *bp,
1788 struct vfpf_tpa_tlv *tpa_tlv)
1789{
1790 int rc = 0;
1791
1792 if (tpa_tlv->tpa_client_info.max_sges_for_packet >
1793 U_ETH_MAX_SGES_FOR_PACKET) {
1794 rc = -EINVAL;
1795 BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
1796 tpa_tlv->tpa_client_info.max_sges_for_packet,
1797 U_ETH_MAX_SGES_FOR_PACKET);
1798 }
1799
1800 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
1801 rc = -EINVAL;
1802 BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
1803 tpa_tlv->tpa_client_info.max_tpa_queues,
1804 MAX_AGG_QS(bp));
1805 }
1806
1807 return rc;
1808}
1809
1810static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
1811 struct bnx2x_vf_mbx *mbx)
1812{
1813 struct bnx2x_queue_update_tpa_params vf_op_params;
1814 struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
1815 int rc = 0;
1816
1817 memset(&vf_op_params, 0, sizeof(vf_op_params));
1818
1819 if (bnx2x_validate_tpa_params(bp, tpa_tlv))
1820 goto mbx_resp;
1821
1822 vf_op_params.complete_on_both_clients =
1823 tpa_tlv->tpa_client_info.complete_on_both_clients;
1824 vf_op_params.dont_verify_thr =
1825 tpa_tlv->tpa_client_info.dont_verify_thr;
1826 vf_op_params.max_agg_sz =
1827 tpa_tlv->tpa_client_info.max_agg_size;
1828 vf_op_params.max_sges_pkt =
1829 tpa_tlv->tpa_client_info.max_sges_for_packet;
1830 vf_op_params.max_tpa_queues =
1831 tpa_tlv->tpa_client_info.max_tpa_queues;
1832 vf_op_params.sge_buff_sz =
1833 tpa_tlv->tpa_client_info.sge_buff_size;
1834 vf_op_params.sge_pause_thr_high =
1835 tpa_tlv->tpa_client_info.sge_pause_thr_high;
1836 vf_op_params.sge_pause_thr_low =
1837 tpa_tlv->tpa_client_info.sge_pause_thr_low;
1838 vf_op_params.tpa_mode =
1839 tpa_tlv->tpa_client_info.tpa_mode;
1840 vf_op_params.update_ipv4 =
1841 tpa_tlv->tpa_client_info.update_ipv4;
1842 vf_op_params.update_ipv6 =
1843 tpa_tlv->tpa_client_info.update_ipv6;
1844
1845 rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
1874 1846
1875mbx_resp: 1847mbx_resp:
1876 if (vf->op_rc) 1848 bnx2x_vf_mbx_resp(bp, vf, rc);
1877 bnx2x_vf_mbx_resp(bp, vf);
1878} 1849}
1879 1850
1880/* dispatch request */ 1851/* dispatch request */
@@ -1916,6 +1887,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1916 case CHANNEL_TLV_UPDATE_RSS: 1887 case CHANNEL_TLV_UPDATE_RSS:
1917 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 1888 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1918 return; 1889 return;
1890 case CHANNEL_TLV_UPDATE_TPA:
1891 bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
1892 return;
1919 } 1893 }
1920 1894
1921 } else { 1895 } else {
@@ -1935,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1935 1909
1936 /* can we respond to VF (do we have an address for it?) */ 1910 /* can we respond to VF (do we have an address for it?) */
1937 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { 1911 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1938 /* mbx_resp uses the op_rc of the VF */
1939 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1940
1941 /* notify the VF that we do not support this request */ 1912 /* notify the VF that we do not support this request */
1942 bnx2x_vf_mbx_resp(bp, vf); 1913 bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
1943 } else { 1914 } else {
1944 /* can't send a response since this VF is unknown to us 1915 /* can't send a response since this VF is unknown to us
1945 * just ack the FW to release the mailbox and unlock 1916 * just ack the FW to release the mailbox and unlock
@@ -1952,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1952 } 1923 }
1953} 1924}
1954 1925
1955/* handle new vf-pf message */ 1926void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
1956void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) 1927 struct vf_pf_event_data *vfpf_event)
1957{ 1928{
1958 struct bnx2x_virtf *vf;
1959 struct bnx2x_vf_mbx *mbx;
1960 u8 vf_idx; 1929 u8 vf_idx;
1961 int rc;
1962 1930
1963 DP(BNX2X_MSG_IOV, 1931 DP(BNX2X_MSG_IOV,
1964 "vf pf event received: vfid %d, address_hi %x, address lo %x", 1932 "vf pf event received: vfid %d, address_hi %x, address lo %x",
@@ -1970,50 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
1970 BNX2X_NR_VIRTFN(bp)) { 1938 BNX2X_NR_VIRTFN(bp)) {
1971 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", 1939 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
1972 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); 1940 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
1973 goto mbx_done; 1941 return;
1974 } 1942 }
1943
1975 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); 1944 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
1976 mbx = BP_VF_MBX(bp, vf_idx);
1977 1945
1978 /* verify an event is not currently being processed - 1946 /* Update VFDB with current message and schedule its handling */
1979 * debug failsafe only 1947 mutex_lock(&BP_VFDB(bp)->event_mutex);
1980 */ 1948 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
1981 if (mbx->flags & VF_MSG_INPROCESS) { 1949 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
1982 BNX2X_ERR("Previous message is still being processed, vf_id %d\n", 1950 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
1983 vfpf_event->vf_id); 1951 mutex_unlock(&BP_VFDB(bp)->event_mutex);
1984 goto mbx_done;
1985 }
1986 vf = BP_VF(bp, vf_idx);
1987 1952
1988 /* save the VF message address */ 1953 bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
1989 mbx->vf_addr_hi = vfpf_event->msg_addr_hi; 1954}
1990 mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
1991 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1992 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1993 1955
1994 /* dmae to get the VF request */ 1956/* handle new vf-pf messages */
1995 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, 1957void bnx2x_vf_mbx(struct bnx2x *bp)
1996 mbx->vf_addr_hi, mbx->vf_addr_lo, 1958{
1997 sizeof(union vfpf_tlvs)/4); 1959 struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
1998 if (rc) { 1960 u64 events;
1999 BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); 1961 u8 vf_idx;
2000 goto mbx_error; 1962 int rc;
2001 }
2002 1963
2003 /* process the VF message header */ 1964 if (!vfdb)
2004 mbx->first_tlv = mbx->msg->req.first_tlv; 1965 return;
2005 1966
2006 /* Clean response buffer to refrain from falsely seeing chains */ 1967 mutex_lock(&vfdb->event_mutex);
2007 memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); 1968 events = vfdb->event_occur;
1969 vfdb->event_occur = 0;
1970 mutex_unlock(&vfdb->event_mutex);
2008 1971
2009 /* dispatch the request (will prepare the response) */ 1972 for_each_vf(bp, vf_idx) {
2010 bnx2x_vf_mbx_request(bp, vf, mbx); 1973 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
2011 goto mbx_done; 1974 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2012 1975
2013mbx_error: 1976 /* Handle VFs which have pending events */
2014 bnx2x_vf_release(bp, vf, false); /* non blocking */ 1977 if (!(events & (1ULL << vf_idx)))
2015mbx_done: 1978 continue;
2016 return; 1979
1980 DP(BNX2X_MSG_IOV,
1981 "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
1982 vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
1983 mbx->first_tlv.resp_msg_offset);
1984
1985 /* dmae to get the VF request */
1986 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
1987 vf->abs_vfid, mbx->vf_addr_hi,
1988 mbx->vf_addr_lo,
1989 sizeof(union vfpf_tlvs)/4);
1990 if (rc) {
1991 BNX2X_ERR("Failed to copy request VF %d\n",
1992 vf->abs_vfid);
1993 bnx2x_vf_release(bp, vf);
1994 return;
1995 }
1996
1997 /* process the VF message header */
1998 mbx->first_tlv = mbx->msg->req.first_tlv;
1999
2000 /* Clean response buffer to refrain from falsely
2001 * seeing chains.
2002 */
2003 memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
2004
2005 /* dispatch the request (will prepare the response) */
2006 bnx2x_vf_mbx_request(bp, vf, mbx);
2007 }
2017} 2008}
2018 2009
2019/* propagate local bulletin board to vf */ 2010/* propagate local bulletin board to vf */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 208568bc7a71..c922b81170e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
162#define PFVF_CAP_RSS 0x00000001 162#define PFVF_CAP_RSS 0x00000001
163#define PFVF_CAP_DHC 0x00000002 163#define PFVF_CAP_DHC 0x00000002
164#define PFVF_CAP_TPA 0x00000004 164#define PFVF_CAP_TPA 0x00000004
165#define PFVF_CAP_TPA_UPDATE 0x00000008
165 char fw_ver[32]; 166 char fw_ver[32];
166 u16 db_size; 167 u16 db_size;
167 u8 indices_per_sb; 168 u8 indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
303 u32 rx_mask; /* see mask constants at the top of the file */ 304 u32 rx_mask; /* see mask constants at the top of the file */
304}; 305};
305 306
307struct vfpf_tpa_tlv {
308 struct vfpf_first_tlv first_tlv;
309
310 struct vf_pf_tpa_client_info {
311 aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
312 u8 update_ipv4;
313 u8 update_ipv6;
314 u8 max_tpa_queues;
315 u8 max_sges_for_packet;
316 u8 complete_on_both_clients;
317 u8 dont_verify_thr;
318 u8 tpa_mode;
319 u16 sge_buff_size;
320 u16 max_agg_size;
321 u16 sge_pause_thr_low;
322 u16 sge_pause_thr_high;
323 } tpa_client_info;
324};
325
306/* close VF (disable VF) */ 326/* close VF (disable VF) */
307struct vfpf_close_tlv { 327struct vfpf_close_tlv {
308 struct vfpf_first_tlv first_tlv; 328 struct vfpf_first_tlv first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
331 struct vfpf_set_q_filters_tlv set_q_filters; 351 struct vfpf_set_q_filters_tlv set_q_filters;
332 struct vfpf_release_tlv release; 352 struct vfpf_release_tlv release;
333 struct vfpf_rss_tlv update_rss; 353 struct vfpf_rss_tlv update_rss;
354 struct vfpf_tpa_tlv update_tpa;
334 struct channel_list_end_tlv list_end; 355 struct channel_list_end_tlv list_end;
335 struct tlv_buffer_size tlv_buf_size; 356 struct tlv_buffer_size tlv_buf_size;
336}; 357};
@@ -405,6 +426,7 @@ enum channel_tlvs {
405 CHANNEL_TLV_PF_SET_VLAN, 426 CHANNEL_TLV_PF_SET_VLAN,
406 CHANNEL_TLV_UPDATE_RSS, 427 CHANNEL_TLV_UPDATE_RSS,
407 CHANNEL_TLV_PHYS_PORT_ID, 428 CHANNEL_TLV_PHYS_PORT_ID,
429 CHANNEL_TLV_UPDATE_TPA,
408 CHANNEL_TLV_MAX 430 CHANNEL_TLV_MAX
409}; 431};
410 432
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644
index 000000000000..31f55a90a197
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_BCMGENET) += genet.o
2genet-objs := bcmgenet.o bcmmii.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644
index 000000000000..adf8acbddf56
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -0,0 +1,2584 @@
1/*
2 * Broadcom GENET (Gigabit Ethernet) controller driver
3 *
4 * Copyright (c) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "bcmgenet: " fmt
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/sched.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/interrupt.h>
28#include <linux/string.h>
29#include <linux/if_ether.h>
30#include <linux/init.h>
31#include <linux/errno.h>
32#include <linux/delay.h>
33#include <linux/platform_device.h>
34#include <linux/dma-mapping.h>
35#include <linux/pm.h>
36#include <linux/clk.h>
37#include <linux/version.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_irq.h>
41#include <linux/of_net.h>
42#include <linux/of_platform.h>
43#include <net/arp.h>
44
45#include <linux/mii.h>
46#include <linux/ethtool.h>
47#include <linux/netdevice.h>
48#include <linux/inetdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/skbuff.h>
51#include <linux/in.h>
52#include <linux/ip.h>
53#include <linux/ipv6.h>
54#include <linux/phy.h>
55
56#include <asm/unaligned.h>
57
58#include "bcmgenet.h"
59
60/* Maximum number of hardware queues, downsized if needed */
61#define GENET_MAX_MQ_CNT 4
62
63/* Default highest priority queue for multi queue support */
64#define GENET_Q0_PRIORITY 0
65
66#define GENET_DEFAULT_BD_CNT \
67 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
68
69#define RX_BUF_LENGTH 2048
70#define SKB_ALIGNMENT 32
71
72/* Tx/Rx DMA register offset, skip 256 descriptors */
73#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
74#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
75
76#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
77 TOTAL_DESC * DMA_DESC_SIZE)
78
79#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
80 TOTAL_DESC * DMA_DESC_SIZE)
81
82static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
83 void __iomem *d, u32 value)
84{
85 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
86}
87
88static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
89 void __iomem *d)
90{
91 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
92}
93
94static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
95 void __iomem *d,
96 dma_addr_t addr)
97{
98 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
99
100 /* Register writes to GISB bus can take couple hundred nanoseconds
101 * and are done for each packet, save these expensive writes unless
102 * the platform is explicitely configured for 64-bits/LPAE.
103 */
104#ifdef CONFIG_PHYS_ADDR_T_64BIT
105 if (priv->hw_params->flags & GENET_HAS_40BITS)
106 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
107#endif
108}
109
110/* Combined address + length/status setter */
111static inline void dmadesc_set(struct bcmgenet_priv *priv,
112 void __iomem *d, dma_addr_t addr, u32 val)
113{
114 dmadesc_set_length_status(priv, d, val);
115 dmadesc_set_addr(priv, d, addr);
116}
117
118static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
119 void __iomem *d)
120{
121 dma_addr_t addr;
122
123 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
124
125 /* Register writes to GISB bus can take couple hundred nanoseconds
126 * and are done for each packet, save these expensive writes unless
127 * the platform is explicitely configured for 64-bits/LPAE.
128 */
129#ifdef CONFIG_PHYS_ADDR_T_64BIT
130 if (priv->hw_params->flags & GENET_HAS_40BITS)
131 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
132#endif
133 return addr;
134}
135
136#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
137
138#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
139 NETIF_MSG_LINK)
140
141static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
142{
143 if (GENET_IS_V1(priv))
144 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
145 else
146 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
147}
148
149static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
150{
151 if (GENET_IS_V1(priv))
152 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
153 else
154 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
155}
156
157/* These macros are defined to deal with register map change
158 * between GENET1.1 and GENET2. Only those currently being used
159 * by driver are defined.
160 */
161static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
162{
163 if (GENET_IS_V1(priv))
164 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
165 else
166 return __raw_readl(priv->base +
167 priv->hw_params->tbuf_offset + TBUF_CTRL);
168}
169
170static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
171{
172 if (GENET_IS_V1(priv))
173 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
174 else
175 __raw_writel(val, priv->base +
176 priv->hw_params->tbuf_offset + TBUF_CTRL);
177}
178
179static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
180{
181 if (GENET_IS_V1(priv))
182 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
183 else
184 return __raw_readl(priv->base +
185 priv->hw_params->tbuf_offset + TBUF_BP_MC);
186}
187
188static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
189{
190 if (GENET_IS_V1(priv))
191 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
192 else
193 __raw_writel(val, priv->base +
194 priv->hw_params->tbuf_offset + TBUF_BP_MC);
195}
196
197/* RX/TX DMA register accessors */
198enum dma_reg {
199 DMA_RING_CFG = 0,
200 DMA_CTRL,
201 DMA_STATUS,
202 DMA_SCB_BURST_SIZE,
203 DMA_ARB_CTRL,
204 DMA_PRIORITY,
205 DMA_RING_PRIORITY,
206};
207
208static const u8 bcmgenet_dma_regs_v3plus[] = {
209 [DMA_RING_CFG] = 0x00,
210 [DMA_CTRL] = 0x04,
211 [DMA_STATUS] = 0x08,
212 [DMA_SCB_BURST_SIZE] = 0x0C,
213 [DMA_ARB_CTRL] = 0x2C,
214 [DMA_PRIORITY] = 0x30,
215 [DMA_RING_PRIORITY] = 0x38,
216};
217
218static const u8 bcmgenet_dma_regs_v2[] = {
219 [DMA_RING_CFG] = 0x00,
220 [DMA_CTRL] = 0x04,
221 [DMA_STATUS] = 0x08,
222 [DMA_SCB_BURST_SIZE] = 0x0C,
223 [DMA_ARB_CTRL] = 0x30,
224 [DMA_PRIORITY] = 0x34,
225 [DMA_RING_PRIORITY] = 0x3C,
226};
227
228static const u8 bcmgenet_dma_regs_v1[] = {
229 [DMA_CTRL] = 0x00,
230 [DMA_STATUS] = 0x04,
231 [DMA_SCB_BURST_SIZE] = 0x0C,
232 [DMA_ARB_CTRL] = 0x30,
233 [DMA_PRIORITY] = 0x34,
234 [DMA_RING_PRIORITY] = 0x3C,
235};
236
237/* Set at runtime once bcmgenet version is known */
238static const u8 *bcmgenet_dma_regs;
239
240static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
241{
242 return netdev_priv(dev_get_drvdata(dev));
243}
244
245static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
246 enum dma_reg r)
247{
248 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
249 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
250}
251
252static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
253 u32 val, enum dma_reg r)
254{
255 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
256 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
257}
258
259static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
260 enum dma_reg r)
261{
262 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
263 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
264}
265
266static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
267 u32 val, enum dma_reg r)
268{
269 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
270 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
271}
272
273/* RDMA/TDMA ring registers and accessors
274 * we merge the common fields and just prefix with T/D the registers
275 * having different meaning depending on the direction
276 */
277enum dma_ring_reg {
278 TDMA_READ_PTR = 0,
279 RDMA_WRITE_PTR = TDMA_READ_PTR,
280 TDMA_READ_PTR_HI,
281 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
282 TDMA_CONS_INDEX,
283 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
284 TDMA_PROD_INDEX,
285 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
286 DMA_RING_BUF_SIZE,
287 DMA_START_ADDR,
288 DMA_START_ADDR_HI,
289 DMA_END_ADDR,
290 DMA_END_ADDR_HI,
291 DMA_MBUF_DONE_THRESH,
292 TDMA_FLOW_PERIOD,
293 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
294 TDMA_WRITE_PTR,
295 RDMA_READ_PTR = TDMA_WRITE_PTR,
296 TDMA_WRITE_PTR_HI,
297 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
298};
299
300/* GENET v4 supports 40-bits pointer addressing
301 * for obvious reasons the LO and HI word parts
302 * are contiguous, but this offsets the other
303 * registers.
304 */
305static const u8 genet_dma_ring_regs_v4[] = {
306 [TDMA_READ_PTR] = 0x00,
307 [TDMA_READ_PTR_HI] = 0x04,
308 [TDMA_CONS_INDEX] = 0x08,
309 [TDMA_PROD_INDEX] = 0x0C,
310 [DMA_RING_BUF_SIZE] = 0x10,
311 [DMA_START_ADDR] = 0x14,
312 [DMA_START_ADDR_HI] = 0x18,
313 [DMA_END_ADDR] = 0x1C,
314 [DMA_END_ADDR_HI] = 0x20,
315 [DMA_MBUF_DONE_THRESH] = 0x24,
316 [TDMA_FLOW_PERIOD] = 0x28,
317 [TDMA_WRITE_PTR] = 0x2C,
318 [TDMA_WRITE_PTR_HI] = 0x30,
319};
320
321static const u8 genet_dma_ring_regs_v123[] = {
322 [TDMA_READ_PTR] = 0x00,
323 [TDMA_CONS_INDEX] = 0x04,
324 [TDMA_PROD_INDEX] = 0x08,
325 [DMA_RING_BUF_SIZE] = 0x0C,
326 [DMA_START_ADDR] = 0x10,
327 [DMA_END_ADDR] = 0x14,
328 [DMA_MBUF_DONE_THRESH] = 0x18,
329 [TDMA_FLOW_PERIOD] = 0x1C,
330 [TDMA_WRITE_PTR] = 0x20,
331};
332
333/* Set at runtime once GENET version is known */
334static const u8 *genet_dma_ring_regs;
335
336static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
337 unsigned int ring,
338 enum dma_ring_reg r)
339{
340 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
341 (DMA_RING_SIZE * ring) +
342 genet_dma_ring_regs[r]);
343}
344
345static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
346 unsigned int ring,
347 u32 val,
348 enum dma_ring_reg r)
349{
350 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
351 (DMA_RING_SIZE * ring) +
352 genet_dma_ring_regs[r]);
353}
354
355static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
356 unsigned int ring,
357 enum dma_ring_reg r)
358{
359 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
360 (DMA_RING_SIZE * ring) +
361 genet_dma_ring_regs[r]);
362}
363
364static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
365 unsigned int ring,
366 u32 val,
367 enum dma_ring_reg r)
368{
369 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
370 (DMA_RING_SIZE * ring) +
371 genet_dma_ring_regs[r]);
372}
373
374static int bcmgenet_get_settings(struct net_device *dev,
375 struct ethtool_cmd *cmd)
376{
377 struct bcmgenet_priv *priv = netdev_priv(dev);
378
379 if (!netif_running(dev))
380 return -EINVAL;
381
382 if (!priv->phydev)
383 return -ENODEV;
384
385 return phy_ethtool_gset(priv->phydev, cmd);
386}
387
388static int bcmgenet_set_settings(struct net_device *dev,
389 struct ethtool_cmd *cmd)
390{
391 struct bcmgenet_priv *priv = netdev_priv(dev);
392
393 if (!netif_running(dev))
394 return -EINVAL;
395
396 if (!priv->phydev)
397 return -ENODEV;
398
399 return phy_ethtool_sset(priv->phydev, cmd);
400}
401
402static int bcmgenet_set_rx_csum(struct net_device *dev,
403 netdev_features_t wanted)
404{
405 struct bcmgenet_priv *priv = netdev_priv(dev);
406 u32 rbuf_chk_ctrl;
407 bool rx_csum_en;
408
409 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
410
411 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
412
413 /* enable rx checksumming */
414 if (rx_csum_en)
415 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
416 else
417 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
418 priv->desc_rxchk_en = rx_csum_en;
419
420 /* If UniMAC forwards CRC, we need to skip over it to get
421 * a valid CHK bit to be set in the per-packet status word
422 */
423 if (rx_csum_en && priv->crc_fwd_en)
424 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
425 else
426 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
427
428 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
429
430 return 0;
431}
432
433static int bcmgenet_set_tx_csum(struct net_device *dev,
434 netdev_features_t wanted)
435{
436 struct bcmgenet_priv *priv = netdev_priv(dev);
437 bool desc_64b_en;
438 u32 tbuf_ctrl, rbuf_ctrl;
439
440 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
441 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
442
443 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
444
445 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
446 if (desc_64b_en) {
447 tbuf_ctrl |= RBUF_64B_EN;
448 rbuf_ctrl |= RBUF_64B_EN;
449 } else {
450 tbuf_ctrl &= ~RBUF_64B_EN;
451 rbuf_ctrl &= ~RBUF_64B_EN;
452 }
453 priv->desc_64b_en = desc_64b_en;
454
455 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
456 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
457
458 return 0;
459}
460
461static int bcmgenet_set_features(struct net_device *dev,
462 netdev_features_t features)
463{
464 netdev_features_t changed = features ^ dev->features;
465 netdev_features_t wanted = dev->wanted_features;
466 int ret = 0;
467
468 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
469 ret = bcmgenet_set_tx_csum(dev, wanted);
470 if (changed & (NETIF_F_RXCSUM))
471 ret = bcmgenet_set_rx_csum(dev, wanted);
472
473 return ret;
474}
475
476static u32 bcmgenet_get_msglevel(struct net_device *dev)
477{
478 struct bcmgenet_priv *priv = netdev_priv(dev);
479
480 return priv->msg_enable;
481}
482
483static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
484{
485 struct bcmgenet_priv *priv = netdev_priv(dev);
486
487 priv->msg_enable = level;
488}
489
490/* standard ethtool support functions. */
491enum bcmgenet_stat_type {
492 BCMGENET_STAT_NETDEV = -1,
493 BCMGENET_STAT_MIB_RX,
494 BCMGENET_STAT_MIB_TX,
495 BCMGENET_STAT_RUNT,
496 BCMGENET_STAT_MISC,
497};
498
499struct bcmgenet_stats {
500 char stat_string[ETH_GSTRING_LEN];
501 int stat_sizeof;
502 int stat_offset;
503 enum bcmgenet_stat_type type;
504 /* reg offset from UMAC base for misc counters */
505 u16 reg_offset;
506};
507
508#define STAT_NETDEV(m) { \
509 .stat_string = __stringify(m), \
510 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
511 .stat_offset = offsetof(struct net_device_stats, m), \
512 .type = BCMGENET_STAT_NETDEV, \
513}
514
515#define STAT_GENET_MIB(str, m, _type) { \
516 .stat_string = str, \
517 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
518 .stat_offset = offsetof(struct bcmgenet_priv, m), \
519 .type = _type, \
520}
521
522#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
523#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
524#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
525
526#define STAT_GENET_MISC(str, m, offset) { \
527 .stat_string = str, \
528 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
529 .stat_offset = offsetof(struct bcmgenet_priv, m), \
530 .type = BCMGENET_STAT_MISC, \
531 .reg_offset = offset, \
532}
533
534
535/* There is a 0xC gap between the end of RX and beginning of TX stats and then
536 * between the end of TX stats and the beginning of the RX RUNT
537 */
538#define BCMGENET_STAT_OFFSET 0xc
539
540/* Hardware counters must be kept in sync because the order/offset
541 * is important here (order in structure declaration = order in hardware)
542 */
543static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
544 /* general stats */
545 STAT_NETDEV(rx_packets),
546 STAT_NETDEV(tx_packets),
547 STAT_NETDEV(rx_bytes),
548 STAT_NETDEV(tx_bytes),
549 STAT_NETDEV(rx_errors),
550 STAT_NETDEV(tx_errors),
551 STAT_NETDEV(rx_dropped),
552 STAT_NETDEV(tx_dropped),
553 STAT_NETDEV(multicast),
554 /* UniMAC RSV counters */
555 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
556 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
557 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
558 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
559 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
560 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
561 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
562 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
563 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
564 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
565 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
566 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
567 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
568 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
569 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
570 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
571 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
572 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
573 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
574 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
575 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
576 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
577 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
578 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
579 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
580 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
581 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
582 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
583 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
584 /* UniMAC TSV counters */
585 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
586 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
587 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
588 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
589 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
590 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
591 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
592 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
593 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
594 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
595 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
596 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
597 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
598 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
599 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
600 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
601 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
602 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
603 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
604 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
605 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
606 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
607 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
608 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
609 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
610 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
611 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
612 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
613 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
614 /* UniMAC RUNT counters */
615 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
616 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
617 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
618 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
619 /* Misc UniMAC counters */
620 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
621 UMAC_RBUF_OVFL_CNT),
622 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
623 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
624};
625
626#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
627
628static void bcmgenet_get_drvinfo(struct net_device *dev,
629 struct ethtool_drvinfo *info)
630{
631 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
632 strlcpy(info->version, "v2.0", sizeof(info->version));
633 info->n_stats = BCMGENET_STATS_LEN;
634
635}
636
637static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
638{
639 switch (string_set) {
640 case ETH_SS_STATS:
641 return BCMGENET_STATS_LEN;
642 default:
643 return -EOPNOTSUPP;
644 }
645}
646
647static void bcmgenet_get_strings(struct net_device *dev,
648 u32 stringset, u8 *data)
649{
650 int i;
651
652 switch (stringset) {
653 case ETH_SS_STATS:
654 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
655 memcpy(data + i * ETH_GSTRING_LEN,
656 bcmgenet_gstrings_stats[i].stat_string,
657 ETH_GSTRING_LEN);
658 }
659 break;
660 }
661}
662
663static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
664{
665 int i, j = 0;
666
667 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
668 const struct bcmgenet_stats *s;
669 u8 offset = 0;
670 u32 val = 0;
671 char *p;
672
673 s = &bcmgenet_gstrings_stats[i];
674 switch (s->type) {
675 case BCMGENET_STAT_NETDEV:
676 continue;
677 case BCMGENET_STAT_MIB_RX:
678 case BCMGENET_STAT_MIB_TX:
679 case BCMGENET_STAT_RUNT:
680 if (s->type != BCMGENET_STAT_MIB_RX)
681 offset = BCMGENET_STAT_OFFSET;
682 val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
683 j + offset);
684 break;
685 case BCMGENET_STAT_MISC:
686 val = bcmgenet_umac_readl(priv, s->reg_offset);
687 /* clear if overflowed */
688 if (val == ~0)
689 bcmgenet_umac_writel(priv, 0, s->reg_offset);
690 break;
691 }
692
693 j += s->stat_sizeof;
694 p = (char *)priv + s->stat_offset;
695 *(u32 *)p = val;
696 }
697}
698
699static void bcmgenet_get_ethtool_stats(struct net_device *dev,
700 struct ethtool_stats *stats,
701 u64 *data)
702{
703 struct bcmgenet_priv *priv = netdev_priv(dev);
704 int i;
705
706 if (netif_running(dev))
707 bcmgenet_update_mib_counters(priv);
708
709 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
710 const struct bcmgenet_stats *s;
711 char *p;
712
713 s = &bcmgenet_gstrings_stats[i];
714 if (s->type == BCMGENET_STAT_NETDEV)
715 p = (char *)&dev->stats;
716 else
717 p = (char *)priv;
718 p += s->stat_offset;
719 data[i] = *(u32 *)p;
720 }
721}
722
723/* standard ethtool support functions. */
724static struct ethtool_ops bcmgenet_ethtool_ops = {
725 .get_strings = bcmgenet_get_strings,
726 .get_sset_count = bcmgenet_get_sset_count,
727 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
728 .get_settings = bcmgenet_get_settings,
729 .set_settings = bcmgenet_set_settings,
730 .get_drvinfo = bcmgenet_get_drvinfo,
731 .get_link = ethtool_op_get_link,
732 .get_msglevel = bcmgenet_get_msglevel,
733 .set_msglevel = bcmgenet_set_msglevel,
734};
735
736/* Power down the unimac, based on mode. */
737static void bcmgenet_power_down(struct bcmgenet_priv *priv,
738 enum bcmgenet_power_mode mode)
739{
740 u32 reg;
741
742 switch (mode) {
743 case GENET_POWER_CABLE_SENSE:
744 phy_detach(priv->phydev);
745 break;
746
747 case GENET_POWER_PASSIVE:
748 /* Power down LED */
749 bcmgenet_mii_reset(priv->dev);
750 if (priv->hw_params->flags & GENET_HAS_EXT) {
751 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
752 reg |= (EXT_PWR_DOWN_PHY |
753 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
754 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
755 }
756 break;
757 default:
758 break;
759 }
760}
761
762static void bcmgenet_power_up(struct bcmgenet_priv *priv,
763 enum bcmgenet_power_mode mode)
764{
765 u32 reg;
766
767 if (!(priv->hw_params->flags & GENET_HAS_EXT))
768 return;
769
770 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
771
772 switch (mode) {
773 case GENET_POWER_PASSIVE:
774 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
775 EXT_PWR_DOWN_BIAS);
776 /* fallthrough */
777 case GENET_POWER_CABLE_SENSE:
778 /* enable APD */
779 reg |= EXT_PWR_DN_EN_LD;
780 break;
781 default:
782 break;
783 }
784
785 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
786 bcmgenet_mii_reset(priv->dev);
787}
788
789/* ioctl handle special commands that are not present in ethtool. */
790static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
791{
792 struct bcmgenet_priv *priv = netdev_priv(dev);
793 int val = 0;
794
795 if (!netif_running(dev))
796 return -EINVAL;
797
798 switch (cmd) {
799 case SIOCGMIIPHY:
800 case SIOCGMIIREG:
801 case SIOCSMIIREG:
802 if (!priv->phydev)
803 val = -ENODEV;
804 else
805 val = phy_mii_ioctl(priv->phydev, rq, cmd);
806 break;
807
808 default:
809 val = -EINVAL;
810 break;
811 }
812
813 return val;
814}
815
816static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
817 struct bcmgenet_tx_ring *ring)
818{
819 struct enet_cb *tx_cb_ptr;
820
821 tx_cb_ptr = ring->cbs;
822 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
823 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
824 /* Advancing local write pointer */
825 if (ring->write_ptr == ring->end_ptr)
826 ring->write_ptr = ring->cb_ptr;
827 else
828 ring->write_ptr++;
829
830 return tx_cb_ptr;
831}
832
833/* Simple helper to free a control block's resources */
834static void bcmgenet_free_cb(struct enet_cb *cb)
835{
836 dev_kfree_skb_any(cb->skb);
837 cb->skb = NULL;
838 dma_unmap_addr_set(cb, dma_addr, 0);
839}
840
841static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
842 struct bcmgenet_tx_ring *ring)
843{
844 bcmgenet_intrl2_0_writel(priv,
845 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
846 INTRL2_CPU_MASK_SET);
847}
848
849static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
850 struct bcmgenet_tx_ring *ring)
851{
852 bcmgenet_intrl2_0_writel(priv,
853 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
854 INTRL2_CPU_MASK_CLEAR);
855}
856
857static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
858 struct bcmgenet_tx_ring *ring)
859{
860 bcmgenet_intrl2_1_writel(priv,
861 (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
862 priv->int1_mask &= ~(1 << ring->index);
863}
864
865static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
866 struct bcmgenet_tx_ring *ring)
867{
868 bcmgenet_intrl2_1_writel(priv,
869 (1 << ring->index), INTRL2_CPU_MASK_SET);
870 priv->int1_mask |= (1 << ring->index);
871}
872
873/* Unlocked version of the reclaim routine */
874static void __bcmgenet_tx_reclaim(struct net_device *dev,
875 struct bcmgenet_tx_ring *ring)
876{
877 struct bcmgenet_priv *priv = netdev_priv(dev);
878 int last_tx_cn, last_c_index, num_tx_bds;
879 struct enet_cb *tx_cb_ptr;
880 struct netdev_queue *txq;
881 unsigned int c_index;
882
883 /* Compute how many buffers are transmited since last xmit call */
884 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
885 txq = netdev_get_tx_queue(dev, ring->queue);
886
887 last_c_index = ring->c_index;
888 num_tx_bds = ring->size;
889
890 c_index &= (num_tx_bds - 1);
891
892 if (c_index >= last_c_index)
893 last_tx_cn = c_index - last_c_index;
894 else
895 last_tx_cn = num_tx_bds - last_c_index + c_index;
896
897 netif_dbg(priv, tx_done, dev,
898 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
899 __func__, ring->index,
900 c_index, last_tx_cn, last_c_index);
901
902 /* Reclaim transmitted buffers */
903 while (last_tx_cn-- > 0) {
904 tx_cb_ptr = ring->cbs + last_c_index;
905 if (tx_cb_ptr->skb) {
906 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
907 dma_unmap_single(&dev->dev,
908 dma_unmap_addr(tx_cb_ptr, dma_addr),
909 tx_cb_ptr->skb->len,
910 DMA_TO_DEVICE);
911 bcmgenet_free_cb(tx_cb_ptr);
912 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
913 dev->stats.tx_bytes +=
914 dma_unmap_len(tx_cb_ptr, dma_len);
915 dma_unmap_page(&dev->dev,
916 dma_unmap_addr(tx_cb_ptr, dma_addr),
917 dma_unmap_len(tx_cb_ptr, dma_len),
918 DMA_TO_DEVICE);
919 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
920 }
921 dev->stats.tx_packets++;
922 ring->free_bds += 1;
923
924 last_c_index++;
925 last_c_index &= (num_tx_bds - 1);
926 }
927
928 if (ring->free_bds > (MAX_SKB_FRAGS + 1))
929 ring->int_disable(priv, ring);
930
931 if (netif_tx_queue_stopped(txq))
932 netif_tx_wake_queue(txq);
933
934 ring->c_index = c_index;
935}
936
937static void bcmgenet_tx_reclaim(struct net_device *dev,
938 struct bcmgenet_tx_ring *ring)
939{
940 unsigned long flags;
941
942 spin_lock_irqsave(&ring->lock, flags);
943 __bcmgenet_tx_reclaim(dev, ring);
944 spin_unlock_irqrestore(&ring->lock, flags);
945}
946
947static void bcmgenet_tx_reclaim_all(struct net_device *dev)
948{
949 struct bcmgenet_priv *priv = netdev_priv(dev);
950 int i;
951
952 if (netif_is_multiqueue(dev)) {
953 for (i = 0; i < priv->hw_params->tx_queues; i++)
954 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
955 }
956
957 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
958}
959
960/* Transmits a single SKB (either head of a fragment or a single SKB)
961 * caller must hold priv->lock
962 */
963static int bcmgenet_xmit_single(struct net_device *dev,
964 struct sk_buff *skb,
965 u16 dma_desc_flags,
966 struct bcmgenet_tx_ring *ring)
967{
968 struct bcmgenet_priv *priv = netdev_priv(dev);
969 struct device *kdev = &priv->pdev->dev;
970 struct enet_cb *tx_cb_ptr;
971 unsigned int skb_len;
972 dma_addr_t mapping;
973 u32 length_status;
974 int ret;
975
976 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
977
978 if (unlikely(!tx_cb_ptr))
979 BUG();
980
981 tx_cb_ptr->skb = skb;
982
983 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
984
985 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
986 ret = dma_mapping_error(kdev, mapping);
987 if (ret) {
988 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
989 dev_kfree_skb(skb);
990 return ret;
991 }
992
993 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
994 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
995 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
996 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
997 DMA_TX_APPEND_CRC;
998
999 if (skb->ip_summed == CHECKSUM_PARTIAL)
1000 length_status |= DMA_TX_DO_CSUM;
1001
1002 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1003
1004 /* Decrement total BD count and advance our write pointer */
1005 ring->free_bds -= 1;
1006 ring->prod_index += 1;
1007 ring->prod_index &= DMA_P_INDEX_MASK;
1008
1009 return 0;
1010}
1011
1012/* Transmit a SKB fragement */
1013static int bcmgenet_xmit_frag(struct net_device *dev,
1014 skb_frag_t *frag,
1015 u16 dma_desc_flags,
1016 struct bcmgenet_tx_ring *ring)
1017{
1018 struct bcmgenet_priv *priv = netdev_priv(dev);
1019 struct device *kdev = &priv->pdev->dev;
1020 struct enet_cb *tx_cb_ptr;
1021 dma_addr_t mapping;
1022 int ret;
1023
1024 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1025
1026 if (unlikely(!tx_cb_ptr))
1027 BUG();
1028 tx_cb_ptr->skb = NULL;
1029
1030 mapping = skb_frag_dma_map(kdev, frag, 0,
1031 skb_frag_size(frag), DMA_TO_DEVICE);
1032 ret = dma_mapping_error(kdev, mapping);
1033 if (ret) {
1034 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1035 __func__);
1036 return ret;
1037 }
1038
1039 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1040 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1041
1042 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1043 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1044 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1045
1046
1047 ring->free_bds -= 1;
1048 ring->prod_index += 1;
1049 ring->prod_index &= DMA_P_INDEX_MASK;
1050
1051 return 0;
1052}
1053
1054/* Reallocate the SKB to put enough headroom in front of it and insert
1055 * the transmit checksum offsets in the descriptors
1056 */
1057static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
1058{
1059 struct status_64 *status = NULL;
1060 struct sk_buff *new_skb;
1061 u16 offset;
1062 u8 ip_proto;
1063 u16 ip_ver;
1064 u32 tx_csum_info;
1065
1066 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1067 /* If 64 byte status block enabled, must make sure skb has
1068 * enough headroom for us to insert 64B status block.
1069 */
1070 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1071 dev_kfree_skb(skb);
1072 if (!new_skb) {
1073 dev->stats.tx_errors++;
1074 dev->stats.tx_dropped++;
1075 return -ENOMEM;
1076 }
1077 skb = new_skb;
1078 }
1079
1080 skb_push(skb, sizeof(*status));
1081 status = (struct status_64 *)skb->data;
1082
1083 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1084 ip_ver = htons(skb->protocol);
1085 switch (ip_ver) {
1086 case ETH_P_IP:
1087 ip_proto = ip_hdr(skb)->protocol;
1088 break;
1089 case ETH_P_IPV6:
1090 ip_proto = ipv6_hdr(skb)->nexthdr;
1091 break;
1092 default:
1093 return 0;
1094 }
1095
1096 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1097 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1098 (offset + skb->csum_offset);
1099
1100 /* Set the length valid bit for TCP and UDP and just set
1101 * the special UDP flag for IPv4, else just set to 0.
1102 */
1103 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1104 tx_csum_info |= STATUS_TX_CSUM_LV;
1105 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1106 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1107 } else
1108 tx_csum_info = 0;
1109
1110 status->tx_csum_info = tx_csum_info;
1111 }
1112
1113 return 0;
1114}
1115
1116static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1117{
1118 struct bcmgenet_priv *priv = netdev_priv(dev);
1119 struct bcmgenet_tx_ring *ring = NULL;
1120 struct netdev_queue *txq;
1121 unsigned long flags = 0;
1122 int nr_frags, index;
1123 u16 dma_desc_flags;
1124 int ret;
1125 int i;
1126
1127 index = skb_get_queue_mapping(skb);
1128 /* Mapping strategy:
1129 * queue_mapping = 0, unclassified, packet xmited through ring16
1130 * queue_mapping = 1, goes to ring 0. (highest priority queue
1131 * queue_mapping = 2, goes to ring 1.
1132 * queue_mapping = 3, goes to ring 2.
1133 * queue_mapping = 4, goes to ring 3.
1134 */
1135 if (index == 0)
1136 index = DESC_INDEX;
1137 else
1138 index -= 1;
1139
1140 nr_frags = skb_shinfo(skb)->nr_frags;
1141 ring = &priv->tx_rings[index];
1142 txq = netdev_get_tx_queue(dev, ring->queue);
1143
1144 spin_lock_irqsave(&ring->lock, flags);
1145 if (ring->free_bds <= nr_frags + 1) {
1146 netif_tx_stop_queue(txq);
1147 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1148 __func__, index, ring->queue);
1149 ret = NETDEV_TX_BUSY;
1150 goto out;
1151 }
1152
1153 /* set the SKB transmit checksum */
1154 if (priv->desc_64b_en) {
1155 ret = bcmgenet_put_tx_csum(dev, skb);
1156 if (ret) {
1157 ret = NETDEV_TX_OK;
1158 goto out;
1159 }
1160 }
1161
1162 dma_desc_flags = DMA_SOP;
1163 if (nr_frags == 0)
1164 dma_desc_flags |= DMA_EOP;
1165
1166 /* Transmit single SKB or head of fragment list */
1167 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1168 if (ret) {
1169 ret = NETDEV_TX_OK;
1170 goto out;
1171 }
1172
1173 /* xmit fragment */
1174 for (i = 0; i < nr_frags; i++) {
1175 ret = bcmgenet_xmit_frag(dev,
1176 &skb_shinfo(skb)->frags[i],
1177 (i == nr_frags - 1) ? DMA_EOP : 0, ring);
1178 if (ret) {
1179 ret = NETDEV_TX_OK;
1180 goto out;
1181 }
1182 }
1183
1184 skb_tx_timestamp(skb);
1185
1186 /* we kept a software copy of how much we should advance the TDMA
1187 * producer index, now write it down to the hardware
1188 */
1189 bcmgenet_tdma_ring_writel(priv, ring->index,
1190 ring->prod_index, TDMA_PROD_INDEX);
1191
1192 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
1193 netif_tx_stop_queue(txq);
1194 ring->int_enable(priv, ring);
1195 }
1196
1197out:
1198 spin_unlock_irqrestore(&ring->lock, flags);
1199
1200 return ret;
1201}
1202
1203
1204static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1205 struct enet_cb *cb)
1206{
1207 struct device *kdev = &priv->pdev->dev;
1208 struct sk_buff *skb;
1209 dma_addr_t mapping;
1210 int ret;
1211
1212 skb = netdev_alloc_skb(priv->dev,
1213 priv->rx_buf_len + SKB_ALIGNMENT);
1214 if (!skb)
1215 return -ENOMEM;
1216
1217 /* a caller did not release this control block */
1218 WARN_ON(cb->skb != NULL);
1219 cb->skb = skb;
1220 mapping = dma_map_single(kdev, skb->data,
1221 priv->rx_buf_len, DMA_FROM_DEVICE);
1222 ret = dma_mapping_error(kdev, mapping);
1223 if (ret) {
1224 bcmgenet_free_cb(cb);
1225 netif_err(priv, rx_err, priv->dev,
1226 "%s DMA map failed\n", __func__);
1227 return ret;
1228 }
1229
1230 dma_unmap_addr_set(cb, dma_addr, mapping);
1231 /* assign packet, prepare descriptor, and advance pointer */
1232
1233 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
1234
1235 /* turn on the newly assigned BD for DMA to use */
1236 priv->rx_bd_assign_index++;
1237 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
1238
1239 priv->rx_bd_assign_ptr = priv->rx_bds +
1240 (priv->rx_bd_assign_index * DMA_DESC_SIZE);
1241
1242 return 0;
1243}
1244
1245/* bcmgenet_desc_rx - descriptor based rx process.
1246 * this could be called from bottom half, or from NAPI polling method.
1247 */
1248static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1249 unsigned int budget)
1250{
1251 struct net_device *dev = priv->dev;
1252 struct enet_cb *cb;
1253 struct sk_buff *skb;
1254 u32 dma_length_status;
1255 unsigned long dma_flag;
1256 int len, err;
1257 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1258 unsigned int p_index;
1259 unsigned int chksum_ok = 0;
1260
1261 p_index = bcmgenet_rdma_ring_readl(priv,
1262 DESC_INDEX, RDMA_PROD_INDEX);
1263 p_index &= DMA_P_INDEX_MASK;
1264
1265 if (p_index < priv->rx_c_index)
1266 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
1267 priv->rx_c_index + p_index;
1268 else
1269 rxpkttoprocess = p_index - priv->rx_c_index;
1270
1271 netif_dbg(priv, rx_status, dev,
1272 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1273
1274 while ((rxpktprocessed < rxpkttoprocess) &&
1275 (rxpktprocessed < budget)) {
1276
1277 /* Unmap the packet contents such that we can use the
1278 * RSV from the 64 bytes descriptor when enabled and save
1279 * a 32-bits register read
1280 */
1281 cb = &priv->rx_cbs[priv->rx_read_ptr];
1282 skb = cb->skb;
1283 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1284 priv->rx_buf_len, DMA_FROM_DEVICE);
1285
1286 if (!priv->desc_64b_en) {
1287 dma_length_status = dmadesc_get_length_status(priv,
1288 priv->rx_bds +
1289 (priv->rx_read_ptr *
1290 DMA_DESC_SIZE));
1291 } else {
1292 struct status_64 *status;
1293 status = (struct status_64 *)skb->data;
1294 dma_length_status = status->length_status;
1295 }
1296
1297 /* DMA flags and length are still valid no matter how
1298 * we got the Receive Status Vector (64B RSB or register)
1299 */
1300 dma_flag = dma_length_status & 0xffff;
1301 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1302
1303 netif_dbg(priv, rx_status, dev,
1304 "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1305 __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
1306 dma_length_status);
1307
1308 rxpktprocessed++;
1309
1310 priv->rx_read_ptr++;
1311 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1312
1313 /* out of memory, just drop packets at the hardware level */
1314 if (unlikely(!skb)) {
1315 dev->stats.rx_dropped++;
1316 dev->stats.rx_errors++;
1317 goto refill;
1318 }
1319
1320 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1321 netif_err(priv, rx_status, dev,
1322 "Droping fragmented packet!\n");
1323 dev->stats.rx_dropped++;
1324 dev->stats.rx_errors++;
1325 dev_kfree_skb_any(cb->skb);
1326 cb->skb = NULL;
1327 goto refill;
1328 }
1329 /* report errors */
1330 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1331 DMA_RX_OV |
1332 DMA_RX_NO |
1333 DMA_RX_LG |
1334 DMA_RX_RXER))) {
1335 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1336 (unsigned int)dma_flag);
1337 if (dma_flag & DMA_RX_CRC_ERROR)
1338 dev->stats.rx_crc_errors++;
1339 if (dma_flag & DMA_RX_OV)
1340 dev->stats.rx_over_errors++;
1341 if (dma_flag & DMA_RX_NO)
1342 dev->stats.rx_frame_errors++;
1343 if (dma_flag & DMA_RX_LG)
1344 dev->stats.rx_length_errors++;
1345 dev->stats.rx_dropped++;
1346 dev->stats.rx_errors++;
1347
1348 /* discard the packet and advance consumer index.*/
1349 dev_kfree_skb_any(cb->skb);
1350 cb->skb = NULL;
1351 goto refill;
1352 } /* error packet */
1353
1354 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1355 priv->desc_rxchk_en;
1356
1357 skb_put(skb, len);
1358 if (priv->desc_64b_en) {
1359 skb_pull(skb, 64);
1360 len -= 64;
1361 }
1362
1363 if (likely(chksum_ok))
1364 skb->ip_summed = CHECKSUM_UNNECESSARY;
1365
1366 /* remove hardware 2bytes added for IP alignment */
1367 skb_pull(skb, 2);
1368 len -= 2;
1369
1370 if (priv->crc_fwd_en) {
1371 skb_trim(skb, len - ETH_FCS_LEN);
1372 len -= ETH_FCS_LEN;
1373 }
1374
1375 /*Finish setting up the received SKB and send it to the kernel*/
1376 skb->protocol = eth_type_trans(skb, priv->dev);
1377 dev->stats.rx_packets++;
1378 dev->stats.rx_bytes += len;
1379 if (dma_flag & DMA_RX_MULT)
1380 dev->stats.multicast++;
1381
1382 /* Notify kernel */
1383 napi_gro_receive(&priv->napi, skb);
1384 cb->skb = NULL;
1385 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1386
1387 /* refill RX path on the current control block */
1388refill:
1389 err = bcmgenet_rx_refill(priv, cb);
1390 if (err)
1391 netif_err(priv, rx_err, dev, "Rx refill failed\n");
1392 }
1393
1394 return rxpktprocessed;
1395}
1396
1397/* Assign skb to RX DMA descriptor. */
1398static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1399{
1400 struct enet_cb *cb;
1401 int ret = 0;
1402 int i;
1403
1404 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
1405
1406 /* loop here for each buffer needing assign */
1407 for (i = 0; i < priv->num_rx_bds; i++) {
1408 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
1409 if (cb->skb)
1410 continue;
1411
1412 /* set the DMA descriptor length once and for all
1413 * it will only change if we support dynamically sizing
1414 * priv->rx_buf_len, but we do not
1415 */
1416 dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
1417 priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
1418
1419 ret = bcmgenet_rx_refill(priv, cb);
1420 if (ret)
1421 break;
1422
1423 }
1424
1425 return ret;
1426}
1427
1428static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1429{
1430 struct enet_cb *cb;
1431 int i;
1432
1433 for (i = 0; i < priv->num_rx_bds; i++) {
1434 cb = &priv->rx_cbs[i];
1435
1436 if (dma_unmap_addr(cb, dma_addr)) {
1437 dma_unmap_single(&priv->dev->dev,
1438 dma_unmap_addr(cb, dma_addr),
1439 priv->rx_buf_len, DMA_FROM_DEVICE);
1440 dma_unmap_addr_set(cb, dma_addr, 0);
1441 }
1442
1443 if (cb->skb)
1444 bcmgenet_free_cb(cb);
1445 }
1446}
1447
1448static int reset_umac(struct bcmgenet_priv *priv)
1449{
1450 struct device *kdev = &priv->pdev->dev;
1451 unsigned int timeout = 0;
1452 u32 reg;
1453
1454 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1455 bcmgenet_rbuf_ctrl_set(priv, 0);
1456 udelay(10);
1457
1458 /* disable MAC while updating its registers */
1459 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1460
1461 /* issue soft reset, wait for it to complete */
1462 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1463 while (timeout++ < 1000) {
1464 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1465 if (!(reg & CMD_SW_RESET))
1466 return 0;
1467
1468 udelay(1);
1469 }
1470
1471 if (timeout == 1000) {
1472 dev_err(kdev,
1473 "timeout waiting for MAC to come out of resetn\n");
1474 return -ETIMEDOUT;
1475 }
1476
1477 return 0;
1478}
1479
1480static int init_umac(struct bcmgenet_priv *priv)
1481{
1482 struct device *kdev = &priv->pdev->dev;
1483 int ret;
1484 u32 reg, cpu_mask_clear;
1485
1486 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1487
1488 ret = reset_umac(priv);
1489 if (ret)
1490 return ret;
1491
1492 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1493 /* clear tx/rx counter */
1494 bcmgenet_umac_writel(priv,
1495 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
1496 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1497
1498 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1499
1500 /* init rx registers, enable ip header optimization */
1501 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1502 reg |= RBUF_ALIGN_2B;
1503 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1504
1505 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1506 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1507
1508 /* Mask all interrupts.*/
1509 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1510 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1511 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1512
1513 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
1514
1515 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1516
1517 /* Monitor cable plug/unpluged event for internal PHY */
1518 if (phy_is_internal(priv->phydev))
1519 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1520 else if (priv->ext_phy)
1521 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1522 else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1523 reg = bcmgenet_bp_mc_get(priv);
1524 reg |= BIT(priv->hw_params->bp_in_en_shift);
1525
1526 /* bp_mask: back pressure mask */
1527 if (netif_is_multiqueue(priv->dev))
1528 reg |= priv->hw_params->bp_in_mask;
1529 else
1530 reg &= ~priv->hw_params->bp_in_mask;
1531 bcmgenet_bp_mc_set(priv, reg);
1532 }
1533
1534 /* Enable MDIO interrupts on GENET v3+ */
1535 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1536 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1537
1538 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
1539 INTRL2_CPU_MASK_CLEAR);
1540
1541 /* Enable rx/tx engine.*/
1542 dev_dbg(kdev, "done init umac\n");
1543
1544 return 0;
1545}
1546
1547/* Initialize all house-keeping variables for a TX ring, along
1548 * with corresponding hardware registers
1549 */
1550static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1551 unsigned int index, unsigned int size,
1552 unsigned int write_ptr, unsigned int end_ptr)
1553{
1554 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1555 u32 words_per_bd = WORDS_PER_BD(priv);
1556 u32 flow_period_val = 0;
1557 unsigned int first_bd;
1558
1559 spin_lock_init(&ring->lock);
1560 ring->index = index;
1561 if (index == DESC_INDEX) {
1562 ring->queue = 0;
1563 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1564 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1565 } else {
1566 ring->queue = index + 1;
1567 ring->int_enable = bcmgenet_tx_ring_int_enable;
1568 ring->int_disable = bcmgenet_tx_ring_int_disable;
1569 }
1570 ring->cbs = priv->tx_cbs + write_ptr;
1571 ring->size = size;
1572 ring->c_index = 0;
1573 ring->free_bds = size;
1574 ring->write_ptr = write_ptr;
1575 ring->cb_ptr = write_ptr;
1576 ring->end_ptr = end_ptr - 1;
1577 ring->prod_index = 0;
1578
1579 /* Set flow period for ring != 16 */
1580 if (index != DESC_INDEX)
1581 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1582
1583 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1584 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1585 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1586 /* Disable rate control for now */
1587 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1588 TDMA_FLOW_PERIOD);
1589 /* Unclassified traffic goes to ring 16 */
1590 bcmgenet_tdma_ring_writel(priv, index,
1591 ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
1592 DMA_RING_BUF_SIZE);
1593
1594 first_bd = write_ptr;
1595
1596 /* Set start and end address, read and write pointers */
1597 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1598 DMA_START_ADDR);
1599 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1600 TDMA_READ_PTR);
1601 bcmgenet_tdma_ring_writel(priv, index, first_bd,
1602 TDMA_WRITE_PTR);
1603 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1604 DMA_END_ADDR);
1605}
1606
1607/* Initialize a RDMA ring */
1608static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1609 unsigned int index, unsigned int size)
1610{
1611 u32 words_per_bd = WORDS_PER_BD(priv);
1612 int ret;
1613
1614 priv->num_rx_bds = TOTAL_DESC;
1615 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
1616 priv->rx_bd_assign_ptr = priv->rx_bds;
1617 priv->rx_bd_assign_index = 0;
1618 priv->rx_c_index = 0;
1619 priv->rx_read_ptr = 0;
1620 priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
1621 GFP_KERNEL);
1622 if (!priv->rx_cbs)
1623 return -ENOMEM;
1624
1625 ret = bcmgenet_alloc_rx_buffers(priv);
1626 if (ret) {
1627 kfree(priv->rx_cbs);
1628 return ret;
1629 }
1630
1631 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
1632 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1633 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1634 bcmgenet_rdma_ring_writel(priv, index,
1635 ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
1636 DMA_RING_BUF_SIZE);
1637 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
1638 bcmgenet_rdma_ring_writel(priv, index,
1639 words_per_bd * size - 1, DMA_END_ADDR);
1640 bcmgenet_rdma_ring_writel(priv, index,
1641 (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
1642 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1643 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
1644
1645 return ret;
1646}
1647
1648/* init multi xmit queues, only available for GENET2+
1649 * the queue is partitioned as follows:
1650 *
1651 * queue 0 - 3 is priority based, each one has 32 descriptors,
1652 * with queue 0 being the highest priority queue.
1653 *
1654 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
1655 * descriptors: 256 - (number of tx queues * bds per queues) = 128
1656 * descriptors.
1657 *
1658 * The transmit control block pool is then partitioned as following:
1659 * - tx_cbs[0...127] are for queue 16
1660 * - tx_ring_cbs[0] points to tx_cbs[128..159]
1661 * - tx_ring_cbs[1] points to tx_cbs[160..191]
1662 * - tx_ring_cbs[2] points to tx_cbs[192..223]
1663 * - tx_ring_cbs[3] points to tx_cbs[224..255]
1664 */
1665static void bcmgenet_init_multiq(struct net_device *dev)
1666{
1667 struct bcmgenet_priv *priv = netdev_priv(dev);
1668 unsigned int i, dma_enable;
1669 u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
1670
1671 if (!netif_is_multiqueue(dev)) {
1672 netdev_warn(dev, "called with non multi queue aware HW\n");
1673 return;
1674 }
1675
1676 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1677 dma_enable = dma_ctrl & DMA_EN;
1678 dma_ctrl &= ~DMA_EN;
1679 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1680
1681 /* Enable strict priority arbiter mode */
1682 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1683
1684 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1685 /* first 64 tx_cbs are reserved for default tx queue
1686 * (ring 16)
1687 */
1688 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
1689 i * priv->hw_params->bds_cnt,
1690 (i + 1) * priv->hw_params->bds_cnt);
1691
1692 /* Configure ring as decriptor ring and setup priority */
1693 ring_cfg |= 1 << i;
1694 dma_priority |= ((GENET_Q0_PRIORITY + i) <<
1695 (GENET_MAX_MQ_CNT + 1) * i);
1696 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
1697 }
1698
1699 /* Enable rings */
1700 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
1701 reg |= ring_cfg;
1702 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
1703
1704 /* Use configured rings priority and set ring #16 priority */
1705 reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
1706 reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
1707 reg |= dma_priority;
1708 bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
1709
1710 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1711 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1712 reg |= dma_ctrl;
1713 if (dma_enable)
1714 reg |= DMA_EN;
1715 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1716}
1717
1718static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1719{
1720 int i;
1721
1722 /* disable DMA */
1723 bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
1724 bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
1725
1726 for (i = 0; i < priv->num_tx_bds; i++) {
1727 if (priv->tx_cbs[i].skb != NULL) {
1728 dev_kfree_skb(priv->tx_cbs[i].skb);
1729 priv->tx_cbs[i].skb = NULL;
1730 }
1731 }
1732
1733 bcmgenet_free_rx_buffers(priv);
1734 kfree(priv->rx_cbs);
1735 kfree(priv->tx_cbs);
1736}
1737
1738/* init_edma: Initialize DMA control register */
1739static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1740{
1741 int ret;
1742
1743 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
1744
1745 /* by default, enable ring 16 (descriptor based) */
1746 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
1747 if (ret) {
1748 netdev_err(priv->dev, "failed to initialize RX ring\n");
1749 return ret;
1750 }
1751
1752 /* init rDma */
1753 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1754
1755 /* Init tDma */
1756 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1757
1758 /* Initialize commont TX ring structures */
1759 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
1760 priv->num_tx_bds = TOTAL_DESC;
1761 priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
1762 GFP_KERNEL);
1763 if (!priv->tx_cbs) {
1764 bcmgenet_fini_dma(priv);
1765 return -ENOMEM;
1766 }
1767
1768 /* initialize multi xmit queue */
1769 bcmgenet_init_multiq(priv->dev);
1770
1771 /* initialize special ring 16 */
1772 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
1773 priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
1774 TOTAL_DESC);
1775
1776 return 0;
1777}
1778
1779/* NAPI polling method*/
1780static int bcmgenet_poll(struct napi_struct *napi, int budget)
1781{
1782 struct bcmgenet_priv *priv = container_of(napi,
1783 struct bcmgenet_priv, napi);
1784 unsigned int work_done;
1785
1786 /* tx reclaim */
1787 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1788
1789 work_done = bcmgenet_desc_rx(priv, budget);
1790
1791 /* Advancing our consumer index*/
1792 priv->rx_c_index += work_done;
1793 priv->rx_c_index &= DMA_C_INDEX_MASK;
1794 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
1795 priv->rx_c_index, RDMA_CONS_INDEX);
1796 if (work_done < budget) {
1797 napi_complete(napi);
1798 bcmgenet_intrl2_0_writel(priv,
1799 UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
1800 }
1801
1802 return work_done;
1803}
1804
1805/* Interrupt bottom half */
1806static void bcmgenet_irq_task(struct work_struct *work)
1807{
1808 struct bcmgenet_priv *priv = container_of(
1809 work, struct bcmgenet_priv, bcmgenet_irq_work);
1810
1811 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
1812
1813 /* Link UP/DOWN event */
1814 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1815 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
1816 phy_mac_interrupt(priv->phydev,
1817 priv->irq0_stat & UMAC_IRQ_LINK_UP);
1818 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
1819 }
1820}
1821
1822/* bcmgenet_isr1: interrupt handler for ring buffer. */
1823static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
1824{
1825 struct bcmgenet_priv *priv = dev_id;
1826 unsigned int index;
1827
1828 /* Save irq status for bottom-half processing. */
1829 priv->irq1_stat =
1830 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
1831 ~priv->int1_mask;
1832 /* clear inerrupts*/
1833 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
1834
1835 netif_dbg(priv, intr, priv->dev,
1836 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
1837 /* Check the MBDONE interrupts.
1838 * packet is done, reclaim descriptors
1839 */
1840 if (priv->irq1_stat & 0x0000ffff) {
1841 index = 0;
1842 for (index = 0; index < 16; index++) {
1843 if (priv->irq1_stat & (1 << index))
1844 bcmgenet_tx_reclaim(priv->dev,
1845 &priv->tx_rings[index]);
1846 }
1847 }
1848 return IRQ_HANDLED;
1849}
1850
1851/* bcmgenet_isr0: Handle various interrupts. */
1852static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1853{
1854 struct bcmgenet_priv *priv = dev_id;
1855
1856 /* Save irq status for bottom-half processing. */
1857 priv->irq0_stat =
1858 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
1859 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1860 /* clear inerrupts*/
1861 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1862
1863 netif_dbg(priv, intr, priv->dev,
1864 "IRQ=0x%x\n", priv->irq0_stat);
1865
1866 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
1867 /* We use NAPI(software interrupt throttling, if
1868 * Rx Descriptor throttling is not used.
1869 * Disable interrupt, will be enabled in the poll method.
1870 */
1871 if (likely(napi_schedule_prep(&priv->napi))) {
1872 bcmgenet_intrl2_0_writel(priv,
1873 UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
1874 __napi_schedule(&priv->napi);
1875 }
1876 }
1877 if (priv->irq0_stat &
1878 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
1879 /* Tx reclaim */
1880 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1881 }
1882 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
1883 UMAC_IRQ_PHY_DET_F |
1884 UMAC_IRQ_LINK_UP |
1885 UMAC_IRQ_LINK_DOWN |
1886 UMAC_IRQ_HFB_SM |
1887 UMAC_IRQ_HFB_MM |
1888 UMAC_IRQ_MPD_R)) {
1889 /* all other interested interrupts handled in bottom half */
1890 schedule_work(&priv->bcmgenet_irq_work);
1891 }
1892
1893 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1894 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
1895 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1896 wake_up(&priv->wq);
1897 }
1898
1899 return IRQ_HANDLED;
1900}
1901
1902static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
1903{
1904 u32 reg;
1905
1906 reg = bcmgenet_rbuf_ctrl_get(priv);
1907 reg |= BIT(1);
1908 bcmgenet_rbuf_ctrl_set(priv, reg);
1909 udelay(10);
1910
1911 reg &= ~BIT(1);
1912 bcmgenet_rbuf_ctrl_set(priv, reg);
1913 udelay(10);
1914}
1915
1916static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
1917 unsigned char *addr)
1918{
1919 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1920 (addr[2] << 8) | addr[3], UMAC_MAC0);
1921 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1922}
1923
1924static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
1925{
1926 int ret;
1927
1928 /* From WOL-enabled suspend, switch to regular clock */
1929 clk_disable(priv->clk_wol);
1930 /* init umac registers to synchronize s/w with h/w */
1931 ret = init_umac(priv);
1932 if (ret)
1933 return ret;
1934
1935 phy_init_hw(priv->phydev);
1936 /* Speed settings must be restored */
1937 bcmgenet_mii_config(priv->dev);
1938
1939 return 0;
1940}
1941
1942/* Returns a reusable dma control register value */
1943static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
1944{
1945 u32 reg;
1946 u32 dma_ctrl;
1947
1948 /* disable DMA */
1949 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
1950 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1951 reg &= ~dma_ctrl;
1952 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1953
1954 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1955 reg &= ~dma_ctrl;
1956 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1957
1958 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
1959 udelay(10);
1960 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
1961
1962 return dma_ctrl;
1963}
1964
1965static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
1966{
1967 u32 reg;
1968
1969 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1970 reg |= dma_ctrl;
1971 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1972
1973 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1974 reg |= dma_ctrl;
1975 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1976}
1977
1978static int bcmgenet_open(struct net_device *dev)
1979{
1980 struct bcmgenet_priv *priv = netdev_priv(dev);
1981 unsigned long dma_ctrl;
1982 u32 reg;
1983 int ret;
1984
1985 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
1986
1987 /* Turn on the clock */
1988 if (!IS_ERR(priv->clk))
1989 clk_prepare_enable(priv->clk);
1990
1991 /* take MAC out of reset */
1992 bcmgenet_umac_reset(priv);
1993
1994 ret = init_umac(priv);
1995 if (ret)
1996 goto err_clk_disable;
1997
1998 /* disable ethernet MAC while updating its registers */
1999 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2000 reg &= ~(CMD_TX_EN | CMD_RX_EN);
2001 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2002
2003 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2004
2005 if (priv->wol_enabled) {
2006 ret = bcmgenet_wol_resume(priv);
2007 if (ret)
2008 return ret;
2009 }
2010
2011 if (phy_is_internal(priv->phydev)) {
2012 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2013 reg |= EXT_ENERGY_DET_MASK;
2014 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2015 }
2016
2017 /* Disable RX/TX DMA and flush TX queues */
2018 dma_ctrl = bcmgenet_dma_disable(priv);
2019
2020 /* Reinitialize TDMA and RDMA and SW housekeeping */
2021 ret = bcmgenet_init_dma(priv);
2022 if (ret) {
2023 netdev_err(dev, "failed to initialize DMA\n");
2024 goto err_fini_dma;
2025 }
2026
2027 /* Always enable ring 16 - descriptor ring */
2028 bcmgenet_enable_dma(priv, dma_ctrl);
2029
2030 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2031 dev->name, priv);
2032 if (ret < 0) {
2033 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2034 goto err_fini_dma;
2035 }
2036
2037 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2038 dev->name, priv);
2039 if (ret < 0) {
2040 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2041 goto err_irq0;
2042 }
2043
2044 /* Start the network engine */
2045 napi_enable(&priv->napi);
2046
2047 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2048 reg |= (CMD_TX_EN | CMD_RX_EN);
2049 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2050
2051 /* Make sure we reflect the value of CRC_CMD_FWD */
2052 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2053
2054 device_set_wakeup_capable(&dev->dev, 1);
2055
2056 if (phy_is_internal(priv->phydev))
2057 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2058
2059 netif_tx_start_all_queues(dev);
2060
2061 phy_start(priv->phydev);
2062
2063 return 0;
2064
2065err_irq0:
2066 free_irq(priv->irq0, dev);
2067err_fini_dma:
2068 bcmgenet_fini_dma(priv);
2069err_clk_disable:
2070 if (!IS_ERR(priv->clk))
2071 clk_disable_unprepare(priv->clk);
2072 return ret;
2073}
2074
2075static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2076{
2077 int ret = 0;
2078 int timeout = 0;
2079 u32 reg;
2080
2081 /* Disable TDMA to stop add more frames in TX DMA */
2082 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2083 reg &= ~DMA_EN;
2084 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2085
2086 /* Check TDMA status register to confirm TDMA is disabled */
2087 while (timeout++ < DMA_TIMEOUT_VAL) {
2088 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2089 if (reg & DMA_DISABLED)
2090 break;
2091
2092 udelay(1);
2093 }
2094
2095 if (timeout == DMA_TIMEOUT_VAL) {
2096 netdev_warn(priv->dev,
2097 "Timed out while disabling TX DMA\n");
2098 ret = -ETIMEDOUT;
2099 }
2100
2101 /* Wait 10ms for packet drain in both tx and rx dma */
2102 usleep_range(10000, 20000);
2103
2104 /* Disable RDMA */
2105 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2106 reg &= ~DMA_EN;
2107 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2108
2109 timeout = 0;
2110 /* Check RDMA status register to confirm RDMA is disabled */
2111 while (timeout++ < DMA_TIMEOUT_VAL) {
2112 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2113 if (reg & DMA_DISABLED)
2114 break;
2115
2116 udelay(1);
2117 }
2118
2119 if (timeout == DMA_TIMEOUT_VAL) {
2120 netdev_warn(priv->dev,
2121 "Timed out while disabling RX DMA\n");
2122 ret = -ETIMEDOUT;
2123 }
2124
2125 return ret;
2126}
2127
2128static int bcmgenet_close(struct net_device *dev)
2129{
2130 struct bcmgenet_priv *priv = netdev_priv(dev);
2131 int ret;
2132 u32 reg;
2133
2134 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2135
2136 phy_stop(priv->phydev);
2137
2138 /* Disable MAC receive */
2139 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2140 reg &= ~CMD_RX_EN;
2141 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2142
2143 netif_tx_stop_all_queues(dev);
2144
2145 ret = bcmgenet_dma_teardown(priv);
2146 if (ret)
2147 return ret;
2148
2149 /* Disable MAC transmit. TX DMA disabled have to done before this */
2150 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2151 reg &= ~CMD_TX_EN;
2152 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2153
2154 napi_disable(&priv->napi);
2155
2156 /* tx reclaim */
2157 bcmgenet_tx_reclaim_all(dev);
2158 bcmgenet_fini_dma(priv);
2159
2160 free_irq(priv->irq0, priv);
2161 free_irq(priv->irq1, priv);
2162
2163 /* Wait for pending work items to complete - we are stopping
2164 * the clock now. Since interrupts are disabled, no new work
2165 * will be scheduled.
2166 */
2167 cancel_work_sync(&priv->bcmgenet_irq_work);
2168
2169 if (phy_is_internal(priv->phydev))
2170 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2171
2172 if (priv->wol_enabled)
2173 clk_enable(priv->clk_wol);
2174
2175 if (!IS_ERR(priv->clk))
2176 clk_disable_unprepare(priv->clk);
2177
2178 return 0;
2179}
2180
2181static void bcmgenet_timeout(struct net_device *dev)
2182{
2183 struct bcmgenet_priv *priv = netdev_priv(dev);
2184
2185 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2186
2187 dev->trans_start = jiffies;
2188
2189 dev->stats.tx_errors++;
2190
2191 netif_tx_wake_all_queues(dev);
2192}
2193
2194#define MAX_MC_COUNT 16
2195
2196static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2197 unsigned char *addr,
2198 int *i,
2199 int *mc)
2200{
2201 u32 reg;
2202
2203 bcmgenet_umac_writel(priv,
2204 addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
2205 bcmgenet_umac_writel(priv,
2206 addr[2] << 24 | addr[3] << 16 |
2207 addr[4] << 8 | addr[5],
2208 UMAC_MDF_ADDR + ((*i + 1) * 4));
2209 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2210 reg |= (1 << (MAX_MC_COUNT - *mc));
2211 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2212 *i += 2;
2213 (*mc)++;
2214}
2215
2216static void bcmgenet_set_rx_mode(struct net_device *dev)
2217{
2218 struct bcmgenet_priv *priv = netdev_priv(dev);
2219 struct netdev_hw_addr *ha;
2220 int i, mc;
2221 u32 reg;
2222
2223 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2224
2225 /* Promiscous mode */
2226 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2227 if (dev->flags & IFF_PROMISC) {
2228 reg |= CMD_PROMISC;
2229 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2230 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2231 return;
2232 } else {
2233 reg &= ~CMD_PROMISC;
2234 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2235 }
2236
2237 /* UniMac doesn't support ALLMULTI */
2238 if (dev->flags & IFF_ALLMULTI) {
2239 netdev_warn(dev, "ALLMULTI is not supported\n");
2240 return;
2241 }
2242
2243 /* update MDF filter */
2244 i = 0;
2245 mc = 0;
2246 /* Broadcast */
2247 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2248 /* my own address.*/
2249 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2250 /* Unicast list*/
2251 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2252 return;
2253
2254 if (!netdev_uc_empty(dev))
2255 netdev_for_each_uc_addr(ha, dev)
2256 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2257 /* Multicast */
2258 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2259 return;
2260
2261 netdev_for_each_mc_addr(ha, dev)
2262 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2263}
2264
2265/* Set the hardware MAC address. */
2266static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2267{
2268 struct sockaddr *addr = p;
2269
2270 /* Setting the MAC address at the hardware level is not possible
2271 * without disabling the UniMAC RX/TX enable bits.
2272 */
2273 if (netif_running(dev))
2274 return -EBUSY;
2275
2276 ether_addr_copy(dev->dev_addr, addr->sa_data);
2277
2278 return 0;
2279}
2280
2281static const struct net_device_ops bcmgenet_netdev_ops = {
2282 .ndo_open = bcmgenet_open,
2283 .ndo_stop = bcmgenet_close,
2284 .ndo_start_xmit = bcmgenet_xmit,
2285 .ndo_tx_timeout = bcmgenet_timeout,
2286 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2287 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2288 .ndo_do_ioctl = bcmgenet_ioctl,
2289 .ndo_set_features = bcmgenet_set_features,
2290};
2291
2292/* Array of GENET hardware parameters/characteristics */
2293static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2294 [GENET_V1] = {
2295 .tx_queues = 0,
2296 .rx_queues = 0,
2297 .bds_cnt = 0,
2298 .bp_in_en_shift = 16,
2299 .bp_in_mask = 0xffff,
2300 .hfb_filter_cnt = 16,
2301 .qtag_mask = 0x1F,
2302 .hfb_offset = 0x1000,
2303 .rdma_offset = 0x2000,
2304 .tdma_offset = 0x3000,
2305 .words_per_bd = 2,
2306 },
2307 [GENET_V2] = {
2308 .tx_queues = 4,
2309 .rx_queues = 4,
2310 .bds_cnt = 32,
2311 .bp_in_en_shift = 16,
2312 .bp_in_mask = 0xffff,
2313 .hfb_filter_cnt = 16,
2314 .qtag_mask = 0x1F,
2315 .tbuf_offset = 0x0600,
2316 .hfb_offset = 0x1000,
2317 .hfb_reg_offset = 0x2000,
2318 .rdma_offset = 0x3000,
2319 .tdma_offset = 0x4000,
2320 .words_per_bd = 2,
2321 .flags = GENET_HAS_EXT,
2322 },
2323 [GENET_V3] = {
2324 .tx_queues = 4,
2325 .rx_queues = 4,
2326 .bds_cnt = 32,
2327 .bp_in_en_shift = 17,
2328 .bp_in_mask = 0x1ffff,
2329 .hfb_filter_cnt = 48,
2330 .qtag_mask = 0x3F,
2331 .tbuf_offset = 0x0600,
2332 .hfb_offset = 0x8000,
2333 .hfb_reg_offset = 0xfc00,
2334 .rdma_offset = 0x10000,
2335 .tdma_offset = 0x11000,
2336 .words_per_bd = 2,
2337 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2338 },
2339 [GENET_V4] = {
2340 .tx_queues = 4,
2341 .rx_queues = 4,
2342 .bds_cnt = 32,
2343 .bp_in_en_shift = 17,
2344 .bp_in_mask = 0x1ffff,
2345 .hfb_filter_cnt = 48,
2346 .qtag_mask = 0x3F,
2347 .tbuf_offset = 0x0600,
2348 .hfb_offset = 0x8000,
2349 .hfb_reg_offset = 0xfc00,
2350 .rdma_offset = 0x2000,
2351 .tdma_offset = 0x4000,
2352 .words_per_bd = 3,
2353 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2354 },
2355};
2356
2357/* Infer hardware parameters from the detected GENET version */
2358static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2359{
2360 struct bcmgenet_hw_params *params;
2361 u32 reg;
2362 u8 major;
2363
2364 if (GENET_IS_V4(priv)) {
2365 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2366 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2367 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2368 priv->version = GENET_V4;
2369 } else if (GENET_IS_V3(priv)) {
2370 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2371 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2372 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2373 priv->version = GENET_V3;
2374 } else if (GENET_IS_V2(priv)) {
2375 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2376 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2377 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2378 priv->version = GENET_V2;
2379 } else if (GENET_IS_V1(priv)) {
2380 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2381 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2382 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2383 priv->version = GENET_V1;
2384 }
2385
2386 /* enum genet_version starts at 1 */
2387 priv->hw_params = &bcmgenet_hw_params[priv->version];
2388 params = priv->hw_params;
2389
2390 /* Read GENET HW version */
2391 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2392 major = (reg >> 24 & 0x0f);
2393 if (major == 5)
2394 major = 4;
2395 else if (major == 0)
2396 major = 1;
2397 if (major != priv->version) {
2398 dev_err(&priv->pdev->dev,
2399 "GENET version mismatch, got: %d, configured for: %d\n",
2400 major, priv->version);
2401 }
2402
2403 /* Print the GENET core version */
2404 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
2405 major, (reg >> 16) & 0x0f, reg & 0xffff);
2406
2407#ifdef CONFIG_PHYS_ADDR_T_64BIT
2408 if (!(params->flags & GENET_HAS_40BITS))
2409 pr_warn("GENET does not support 40-bits PA\n");
2410#endif
2411
2412 pr_debug("Configuration for version: %d\n"
2413 "TXq: %1d, RXq: %1d, BDs: %1d\n"
2414 "BP << en: %2d, BP msk: 0x%05x\n"
2415 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2416 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2417 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2418 "Words/BD: %d\n",
2419 priv->version,
2420 params->tx_queues, params->rx_queues, params->bds_cnt,
2421 params->bp_in_en_shift, params->bp_in_mask,
2422 params->hfb_filter_cnt, params->qtag_mask,
2423 params->tbuf_offset, params->hfb_offset,
2424 params->hfb_reg_offset,
2425 params->rdma_offset, params->tdma_offset,
2426 params->words_per_bd);
2427}
2428
2429static const struct of_device_id bcmgenet_match[] = {
2430 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2431 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2432 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2433 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2434 { },
2435};
2436
2437static int bcmgenet_probe(struct platform_device *pdev)
2438{
2439 struct device_node *dn = pdev->dev.of_node;
2440 const struct of_device_id *of_id;
2441 struct bcmgenet_priv *priv;
2442 struct net_device *dev;
2443 const void *macaddr;
2444 struct resource *r;
2445 int err = -EIO;
2446
2447 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2448 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
2449 if (!dev) {
2450 dev_err(&pdev->dev, "can't allocate net device\n");
2451 return -ENOMEM;
2452 }
2453
2454 of_id = of_match_node(bcmgenet_match, dn);
2455 if (!of_id)
2456 return -EINVAL;
2457
2458 priv = netdev_priv(dev);
2459 priv->irq0 = platform_get_irq(pdev, 0);
2460 priv->irq1 = platform_get_irq(pdev, 1);
2461 if (!priv->irq0 || !priv->irq1) {
2462 dev_err(&pdev->dev, "can't find IRQs\n");
2463 err = -EINVAL;
2464 goto err;
2465 }
2466
2467 macaddr = of_get_mac_address(dn);
2468 if (!macaddr) {
2469 dev_err(&pdev->dev, "can't find MAC address\n");
2470 err = -EINVAL;
2471 goto err;
2472 }
2473
2474 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2475 priv->base = devm_ioremap_resource(&pdev->dev, r);
2476 if (IS_ERR(priv->base)) {
2477 err = PTR_ERR(priv->base);
2478 goto err;
2479 }
2480
2481 SET_NETDEV_DEV(dev, &pdev->dev);
2482 dev_set_drvdata(&pdev->dev, dev);
2483 ether_addr_copy(dev->dev_addr, macaddr);
2484 dev->watchdog_timeo = 2 * HZ;
2485 SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
2486 dev->netdev_ops = &bcmgenet_netdev_ops;
2487 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2488
2489 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2490
2491 /* Set hardware features */
2492 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2493 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2494
2495 /* Set the needed headroom to account for any possible
2496 * features enabling/disabling at runtime
2497 */
2498 dev->needed_headroom += 64;
2499
2500 netdev_boot_setup_check(dev);
2501
2502 priv->dev = dev;
2503 priv->pdev = pdev;
2504 priv->version = (enum bcmgenet_version)of_id->data;
2505
2506 bcmgenet_set_hw_params(priv);
2507
2508 /* Mii wait queue */
2509 init_waitqueue_head(&priv->wq);
2510 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2511 priv->rx_buf_len = RX_BUF_LENGTH;
2512 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2513
2514 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2515 if (IS_ERR(priv->clk))
2516 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2517
2518 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2519 if (IS_ERR(priv->clk_wol))
2520 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2521
2522 if (!IS_ERR(priv->clk))
2523 clk_prepare_enable(priv->clk);
2524
2525 err = reset_umac(priv);
2526 if (err)
2527 goto err_clk_disable;
2528
2529 err = bcmgenet_mii_init(dev);
2530 if (err)
2531 goto err_clk_disable;
2532
2533 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2534 * just the ring 16 descriptor based TX
2535 */
2536 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2537 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2538
2539 err = register_netdev(dev);
2540 if (err)
2541 goto err_clk_disable;
2542
2543 /* Turn off the main clock, WOL clock is handled separately */
2544 if (!IS_ERR(priv->clk))
2545 clk_disable_unprepare(priv->clk);
2546
2547 return err;
2548
2549err_clk_disable:
2550 if (!IS_ERR(priv->clk))
2551 clk_disable_unprepare(priv->clk);
2552err:
2553 free_netdev(dev);
2554 return err;
2555}
2556
2557static int bcmgenet_remove(struct platform_device *pdev)
2558{
2559 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2560
2561 dev_set_drvdata(&pdev->dev, NULL);
2562 unregister_netdev(priv->dev);
2563 bcmgenet_mii_exit(priv->dev);
2564 free_netdev(priv->dev);
2565
2566 return 0;
2567}
2568
2569
2570static struct platform_driver bcmgenet_driver = {
2571 .probe = bcmgenet_probe,
2572 .remove = bcmgenet_remove,
2573 .driver = {
2574 .name = "bcmgenet",
2575 .owner = THIS_MODULE,
2576 .of_match_table = bcmgenet_match,
2577 },
2578};
2579module_platform_driver(bcmgenet_driver);
2580
2581MODULE_AUTHOR("Broadcom Corporation");
2582MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2583MODULE_ALIAS("platform:bcmgenet");
2584MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644
index 000000000000..0f117105fed1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -0,0 +1,628 @@
1/*
2 * Copyright (c) 2014 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 *
18*/
19#ifndef __BCMGENET_H__
20#define __BCMGENET_H__
21
22#include <linux/skbuff.h>
23#include <linux/netdevice.h>
24#include <linux/spinlock.h>
25#include <linux/clk.h>
26#include <linux/mii.h>
27#include <linux/if_vlan.h>
28#include <linux/phy.h>
29
30/* total number of Buffer Descriptors, same for Rx/Tx */
31#define TOTAL_DESC 256
32
33/* which ring is descriptor based */
34#define DESC_INDEX 16
35
36/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
37 * 1536 is multiple of 256 bytes
38 */
39#define ENET_BRCM_TAG_LEN 6
40#define ENET_PAD 8
41#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
42 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
43#define DMA_MAX_BURST_LENGTH 0x10
44
45/* misc. configuration */
46#define CLEAR_ALL_HFB 0xFF
47#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
48#define DMA_FC_THRESH_LO 5
49
50/* 64B receive/transmit status block */
51struct status_64 {
52 u32 length_status; /* length and peripheral status */
53 u32 ext_status; /* Extended status*/
54 u32 rx_csum; /* partial rx checksum */
55 u32 unused1[9]; /* unused */
56 u32 tx_csum_info; /* Tx checksum info. */
57 u32 unused2[3]; /* unused */
58};
59
60/* Rx status bits */
61#define STATUS_RX_EXT_MASK 0x1FFFFF
62#define STATUS_RX_CSUM_MASK 0xFFFF
63#define STATUS_RX_CSUM_OK 0x10000
64#define STATUS_RX_CSUM_FR 0x20000
65#define STATUS_RX_PROTO_TCP 0
66#define STATUS_RX_PROTO_UDP 1
67#define STATUS_RX_PROTO_ICMP 2
68#define STATUS_RX_PROTO_OTHER 3
69#define STATUS_RX_PROTO_MASK 3
70#define STATUS_RX_PROTO_SHIFT 18
71#define STATUS_FILTER_INDEX_MASK 0xFFFF
72/* Tx status bits */
73#define STATUS_TX_CSUM_START_MASK 0X7FFF
74#define STATUS_TX_CSUM_START_SHIFT 16
75#define STATUS_TX_CSUM_PROTO_UDP 0x8000
76#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF
77#define STATUS_TX_CSUM_LV 0x80000000
78
79/* DMA Descriptor */
80#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */
81#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */
82#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */
83
84/* Rx/Tx common counter group */
85struct bcmgenet_pkt_counters {
86 u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
87 u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
88 u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
89 u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
90 u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
91 u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
92 u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
93 u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
94 u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
95 u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
96};
97
98/* RSV, Receive Status Vector */
99struct bcmgenet_rx_counters {
100 struct bcmgenet_pkt_counters pkt_cnt;
101 u32 pkt; /* RO (0x428) Received pkt count*/
102 u32 bytes; /* RO Received byte count */
103 u32 mca; /* RO # of Received multicast pkt */
104 u32 bca; /* RO # of Receive broadcast pkt */
105 u32 fcs; /* RO # of Received FCS error */
106 u32 cf; /* RO # of Received control frame pkt*/
107 u32 pf; /* RO # of Received pause frame pkt */
108 u32 uo; /* RO # of unknown op code pkt */
109 u32 aln; /* RO # of alignment error count */
110 u32 flr; /* RO # of frame length out of range count */
111 u32 cde; /* RO # of code error pkt */
112 u32 fcr; /* RO # of carrier sense error pkt */
113 u32 ovr; /* RO # of oversize pkt*/
114 u32 jbr; /* RO # of jabber count */
115 u32 mtue; /* RO # of MTU error pkt*/
116 u32 pok; /* RO # of Received good pkt */
117 u32 uc; /* RO # of unicast pkt */
118 u32 ppp; /* RO # of PPP pkt */
119 u32 rcrc; /* RO (0x470),# of CRC match pkt */
120};
121
122/* TSV, Transmit Status Vector */
123struct bcmgenet_tx_counters {
124 struct bcmgenet_pkt_counters pkt_cnt;
125 u32 pkts; /* RO (0x4a8) Transmited pkt */
126 u32 mca; /* RO # of xmited multicast pkt */
127 u32 bca; /* RO # of xmited broadcast pkt */
128 u32 pf; /* RO # of xmited pause frame count */
129 u32 cf; /* RO # of xmited control frame count */
130 u32 fcs; /* RO # of xmited FCS error count */
131 u32 ovr; /* RO # of xmited oversize pkt */
132 u32 drf; /* RO # of xmited deferral pkt */
133 u32 edf; /* RO # of xmited Excessive deferral pkt*/
134 u32 scl; /* RO # of xmited single collision pkt */
135 u32 mcl; /* RO # of xmited multiple collision pkt*/
136 u32 lcl; /* RO # of xmited late collision pkt */
137 u32 ecl; /* RO # of xmited excessive collision pkt*/
138 u32 frg; /* RO # of xmited fragments pkt*/
139 u32 ncl; /* RO # of xmited total collision count */
140 u32 jbr; /* RO # of xmited jabber count*/
141 u32 bytes; /* RO # of xmited byte count */
142 u32 pok; /* RO # of xmited good pkt */
143 u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
144};
145
146struct bcmgenet_mib_counters {
147 struct bcmgenet_rx_counters rx;
148 struct bcmgenet_tx_counters tx;
149 u32 rx_runt_cnt;
150 u32 rx_runt_fcs;
151 u32 rx_runt_fcs_align;
152 u32 rx_runt_bytes;
153 u32 rbuf_ovflow_cnt;
154 u32 rbuf_err_cnt;
155 u32 mdf_err_cnt;
156};
157
158#define UMAC_HD_BKP_CTRL 0x004
159#define HD_FC_EN (1 << 0)
160#define HD_FC_BKOFF_OK (1 << 1)
161#define IPG_CONFIG_RX_SHIFT 2
162#define IPG_CONFIG_RX_MASK 0x1F
163
164#define UMAC_CMD 0x008
165#define CMD_TX_EN (1 << 0)
166#define CMD_RX_EN (1 << 1)
167#define UMAC_SPEED_10 0
168#define UMAC_SPEED_100 1
169#define UMAC_SPEED_1000 2
170#define UMAC_SPEED_2500 3
171#define CMD_SPEED_SHIFT 2
172#define CMD_SPEED_MASK 3
173#define CMD_PROMISC (1 << 4)
174#define CMD_PAD_EN (1 << 5)
175#define CMD_CRC_FWD (1 << 6)
176#define CMD_PAUSE_FWD (1 << 7)
177#define CMD_RX_PAUSE_IGNORE (1 << 8)
178#define CMD_TX_ADDR_INS (1 << 9)
179#define CMD_HD_EN (1 << 10)
180#define CMD_SW_RESET (1 << 13)
181#define CMD_LCL_LOOP_EN (1 << 15)
182#define CMD_AUTO_CONFIG (1 << 22)
183#define CMD_CNTL_FRM_EN (1 << 23)
184#define CMD_NO_LEN_CHK (1 << 24)
185#define CMD_RMT_LOOP_EN (1 << 25)
186#define CMD_PRBL_EN (1 << 27)
187#define CMD_TX_PAUSE_IGNORE (1 << 28)
188#define CMD_TX_RX_EN (1 << 29)
189#define CMD_RUNT_FILTER_DIS (1 << 30)
190
191#define UMAC_MAC0 0x00C
192#define UMAC_MAC1 0x010
193#define UMAC_MAX_FRAME_LEN 0x014
194
195#define UMAC_TX_FLUSH 0x334
196
197#define UMAC_MIB_START 0x400
198
199#define UMAC_MDIO_CMD 0x614
200#define MDIO_START_BUSY (1 << 29)
201#define MDIO_READ_FAIL (1 << 28)
202#define MDIO_RD (2 << 26)
203#define MDIO_WR (1 << 26)
204#define MDIO_PMD_SHIFT 21
205#define MDIO_PMD_MASK 0x1F
206#define MDIO_REG_SHIFT 16
207#define MDIO_REG_MASK 0x1F
208
209#define UMAC_RBUF_OVFL_CNT 0x61C
210
211#define UMAC_MPD_CTRL 0x620
212#define MPD_EN (1 << 0)
213#define MPD_PW_EN (1 << 27)
214#define MPD_MSEQ_LEN_SHIFT 16
215#define MPD_MSEQ_LEN_MASK 0xFF
216
217#define UMAC_MPD_PW_MS 0x624
218#define UMAC_MPD_PW_LS 0x628
219#define UMAC_RBUF_ERR_CNT 0x634
220#define UMAC_MDF_ERR_CNT 0x638
221#define UMAC_MDF_CTRL 0x650
222#define UMAC_MDF_ADDR 0x654
223#define UMAC_MIB_CTRL 0x580
224#define MIB_RESET_RX (1 << 0)
225#define MIB_RESET_RUNT (1 << 1)
226#define MIB_RESET_TX (1 << 2)
227
228#define RBUF_CTRL 0x00
229#define RBUF_64B_EN (1 << 0)
230#define RBUF_ALIGN_2B (1 << 1)
231#define RBUF_BAD_DIS (1 << 2)
232
233#define RBUF_STATUS 0x0C
234#define RBUF_STATUS_WOL (1 << 0)
235#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1)
236#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2)
237
238#define RBUF_CHK_CTRL 0x14
239#define RBUF_RXCHK_EN (1 << 0)
240#define RBUF_SKIP_FCS (1 << 4)
241
242#define RBUF_TBUF_SIZE_CTRL 0xb4
243
244#define RBUF_HFB_CTRL_V1 0x38
245#define RBUF_HFB_FILTER_EN_SHIFT 16
246#define RBUF_HFB_FILTER_EN_MASK 0xffff0000
247#define RBUF_HFB_EN (1 << 0)
248#define RBUF_HFB_256B (1 << 1)
249#define RBUF_ACPI_EN (1 << 2)
250
251#define RBUF_HFB_LEN_V1 0x3C
252#define RBUF_FLTR_LEN_MASK 0xFF
253#define RBUF_FLTR_LEN_SHIFT 8
254
255#define TBUF_CTRL 0x00
256#define TBUF_BP_MC 0x0C
257
258#define TBUF_CTRL_V1 0x80
259#define TBUF_BP_MC_V1 0xA0
260
261#define HFB_CTRL 0x00
262#define HFB_FLT_ENABLE_V3PLUS 0x04
263#define HFB_FLT_LEN_V2 0x04
264#define HFB_FLT_LEN_V3PLUS 0x1C
265
266/* uniMac intrl2 registers */
267#define INTRL2_CPU_STAT 0x00
268#define INTRL2_CPU_SET 0x04
269#define INTRL2_CPU_CLEAR 0x08
270#define INTRL2_CPU_MASK_STATUS 0x0C
271#define INTRL2_CPU_MASK_SET 0x10
272#define INTRL2_CPU_MASK_CLEAR 0x14
273
274/* INTRL2 instance 0 definitions */
275#define UMAC_IRQ_SCB (1 << 0)
276#define UMAC_IRQ_EPHY (1 << 1)
277#define UMAC_IRQ_PHY_DET_R (1 << 2)
278#define UMAC_IRQ_PHY_DET_F (1 << 3)
279#define UMAC_IRQ_LINK_UP (1 << 4)
280#define UMAC_IRQ_LINK_DOWN (1 << 5)
281#define UMAC_IRQ_UMAC (1 << 6)
282#define UMAC_IRQ_UMAC_TSV (1 << 7)
283#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
284#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9)
285#define UMAC_IRQ_HFB_SM (1 << 10)
286#define UMAC_IRQ_HFB_MM (1 << 11)
287#define UMAC_IRQ_MPD_R (1 << 12)
288#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
289#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
290#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
291#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
292#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
293#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
294/* Only valid for GENETv3+ */
295#define UMAC_IRQ_MDIO_DONE (1 << 23)
296#define UMAC_IRQ_MDIO_ERROR (1 << 24)
297
298/* Register block offsets */
299#define GENET_SYS_OFF 0x0000
300#define GENET_GR_BRIDGE_OFF 0x0040
301#define GENET_EXT_OFF 0x0080
302#define GENET_INTRL2_0_OFF 0x0200
303#define GENET_INTRL2_1_OFF 0x0240
304#define GENET_RBUF_OFF 0x0300
305#define GENET_UMAC_OFF 0x0800
306
307/* SYS block offsets and register definitions */
308#define SYS_REV_CTRL 0x00
309#define SYS_PORT_CTRL 0x04
310#define PORT_MODE_INT_EPHY 0
311#define PORT_MODE_INT_GPHY 1
312#define PORT_MODE_EXT_EPHY 2
313#define PORT_MODE_EXT_GPHY 3
314#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4))
315#define PORT_MODE_EXT_RVMII_50 4
316#define LED_ACT_SOURCE_MAC (1 << 9)
317
318#define SYS_RBUF_FLUSH_CTRL 0x08
319#define SYS_TBUF_FLUSH_CTRL 0x0C
320#define RBUF_FLUSH_CTRL_V1 0x04
321
322/* Ext block register offsets and definitions */
323#define EXT_EXT_PWR_MGMT 0x00
324#define EXT_PWR_DOWN_BIAS (1 << 0)
325#define EXT_PWR_DOWN_DLL (1 << 1)
326#define EXT_PWR_DOWN_PHY (1 << 2)
327#define EXT_PWR_DN_EN_LD (1 << 3)
328#define EXT_ENERGY_DET (1 << 4)
329#define EXT_IDDQ_FROM_PHY (1 << 5)
330#define EXT_PHY_RESET (1 << 8)
331#define EXT_ENERGY_DET_MASK (1 << 12)
332
333#define EXT_RGMII_OOB_CTRL 0x0C
334#define RGMII_MODE_EN (1 << 0)
335#define RGMII_LINK (1 << 4)
336#define OOB_DISABLE (1 << 5)
337#define ID_MODE_DIS (1 << 16)
338
339#define EXT_GPHY_CTRL 0x1C
340#define EXT_CFG_IDDQ_BIAS (1 << 0)
341#define EXT_CFG_PWR_DOWN (1 << 1)
342#define EXT_GPHY_RESET (1 << 5)
343
344/* DMA rings size */
345#define DMA_RING_SIZE (0x40)
346#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1))
347
348/* DMA registers common definitions */
349#define DMA_RW_POINTER_MASK 0x1FF
350#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF
351#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16
352#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF
353#define DMA_BUFFER_DONE_CNT_SHIFT 16
354#define DMA_P_INDEX_MASK 0xFFFF
355#define DMA_C_INDEX_MASK 0xFFFF
356
357/* DMA ring size register */
358#define DMA_RING_SIZE_MASK 0xFFFF
359#define DMA_RING_SIZE_SHIFT 16
360#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
361
362/* DMA interrupt threshold register */
363#define DMA_INTR_THRESHOLD_MASK 0x00FF
364
365/* DMA XON/XOFF register */
366#define DMA_XON_THREHOLD_MASK 0xFFFF
367#define DMA_XOFF_THRESHOLD_MASK 0xFFFF
368#define DMA_XOFF_THRESHOLD_SHIFT 16
369
370/* DMA flow period register */
371#define DMA_FLOW_PERIOD_MASK 0xFFFF
372#define DMA_MAX_PKT_SIZE_MASK 0xFFFF
373#define DMA_MAX_PKT_SIZE_SHIFT 16
374
375
376/* DMA control register */
377#define DMA_EN (1 << 0)
378#define DMA_RING_BUF_EN_SHIFT 0x01
379#define DMA_RING_BUF_EN_MASK 0xFFFF
380#define DMA_TSB_SWAP_EN (1 << 20)
381
382/* DMA status register */
383#define DMA_DISABLED (1 << 0)
384#define DMA_DESC_RAM_INIT_BUSY (1 << 1)
385
386/* DMA SCB burst size register */
387#define DMA_SCB_BURST_SIZE_MASK 0x1F
388
389/* DMA activity vector register */
390#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF
391
392/* DMA backpressure mask register */
393#define DMA_BACKPRESSURE_MASK 0x1FFFF
394#define DMA_PFC_ENABLE (1 << 31)
395
396/* DMA backpressure status register */
397#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF
398
399/* DMA override register */
400#define DMA_LITTLE_ENDIAN_MODE (1 << 0)
401#define DMA_REGISTER_MODE (1 << 1)
402
403/* DMA timeout register */
404#define DMA_TIMEOUT_MASK 0xFFFF
405#define DMA_TIMEOUT_VAL 5000 /* micro seconds */
406
407/* TDMA rate limiting control register */
408#define DMA_RATE_LIMIT_EN_MASK 0xFFFF
409
410/* TDMA arbitration control register */
411#define DMA_ARBITER_MODE_MASK 0x03
412#define DMA_RING_BUF_PRIORITY_MASK 0x1F
413#define DMA_RING_BUF_PRIORITY_SHIFT 5
414#define DMA_RATE_ADJ_MASK 0xFF
415
416/* Tx/Rx Dma Descriptor common bits*/
417#define DMA_BUFLENGTH_MASK 0x0fff
418#define DMA_BUFLENGTH_SHIFT 16
419#define DMA_OWN 0x8000
420#define DMA_EOP 0x4000
421#define DMA_SOP 0x2000
422#define DMA_WRAP 0x1000
423/* Tx specific Dma descriptor bits */
424#define DMA_TX_UNDERRUN 0x0200
425#define DMA_TX_APPEND_CRC 0x0040
426#define DMA_TX_OW_CRC 0x0020
427#define DMA_TX_DO_CSUM 0x0010
428#define DMA_TX_QTAG_SHIFT 7
429
430/* Rx Specific Dma descriptor bits */
431#define DMA_RX_CHK_V3PLUS 0x8000
432#define DMA_RX_CHK_V12 0x1000
433#define DMA_RX_BRDCAST 0x0040
434#define DMA_RX_MULT 0x0020
435#define DMA_RX_LG 0x0010
436#define DMA_RX_NO 0x0008
437#define DMA_RX_RXER 0x0004
438#define DMA_RX_CRC_ERROR 0x0002
439#define DMA_RX_OV 0x0001
440#define DMA_RX_FI_MASK 0x001F
441#define DMA_RX_FI_SHIFT 0x0007
442#define DMA_DESC_ALLOC_MASK 0x00FF
443
444#define DMA_ARBITER_RR 0x00
445#define DMA_ARBITER_WRR 0x01
446#define DMA_ARBITER_SP 0x02
447
448struct enet_cb {
449 struct sk_buff *skb;
450 void __iomem *bd_addr;
451 DEFINE_DMA_UNMAP_ADDR(dma_addr);
452 DEFINE_DMA_UNMAP_LEN(dma_len);
453};
454
455/* power management mode */
456enum bcmgenet_power_mode {
457 GENET_POWER_CABLE_SENSE = 0,
458 GENET_POWER_PASSIVE,
459};
460
461struct bcmgenet_priv;
462
463/* We support both runtime GENET detection and compile-time
464 * to optimize code-paths for a given hardware
465 */
466enum bcmgenet_version {
467 GENET_V1 = 1,
468 GENET_V2,
469 GENET_V3,
470 GENET_V4
471};
472
473#define GENET_IS_V1(p) ((p)->version == GENET_V1)
474#define GENET_IS_V2(p) ((p)->version == GENET_V2)
475#define GENET_IS_V3(p) ((p)->version == GENET_V3)
476#define GENET_IS_V4(p) ((p)->version == GENET_V4)
477
478/* Hardware flags */
479#define GENET_HAS_40BITS (1 << 0)
480#define GENET_HAS_EXT (1 << 1)
481#define GENET_HAS_MDIO_INTR (1 << 2)
482
483/* BCMGENET hardware parameters, keep this structure nicely aligned
484 * since it is going to be used in hot paths
485 */
486struct bcmgenet_hw_params {
487 u8 tx_queues;
488 u8 rx_queues;
489 u8 bds_cnt;
490 u8 bp_in_en_shift;
491 u32 bp_in_mask;
492 u8 hfb_filter_cnt;
493 u8 qtag_mask;
494 u16 tbuf_offset;
495 u32 hfb_offset;
496 u32 hfb_reg_offset;
497 u32 rdma_offset;
498 u32 tdma_offset;
499 u32 words_per_bd;
500 u32 flags;
501};
502
503struct bcmgenet_tx_ring {
504 spinlock_t lock; /* ring lock */
505 unsigned int index; /* ring index */
506 unsigned int queue; /* queue index */
507 struct enet_cb *cbs; /* tx ring buffer control block*/
508 unsigned int size; /* size of each tx ring */
509 unsigned int c_index; /* last consumer index of each ring*/
510 unsigned int free_bds; /* # of free bds for each ring */
511 unsigned int write_ptr; /* Tx ring write pointer SW copy */
512 unsigned int prod_index; /* Tx ring producer index SW copy */
513 unsigned int cb_ptr; /* Tx ring initial CB ptr */
514 unsigned int end_ptr; /* Tx ring end CB ptr */
515 void (*int_enable)(struct bcmgenet_priv *priv,
516 struct bcmgenet_tx_ring *);
517 void (*int_disable)(struct bcmgenet_priv *priv,
518 struct bcmgenet_tx_ring *);
519};
520
521/* device context */
522struct bcmgenet_priv {
523 void __iomem *base;
524 enum bcmgenet_version version;
525 struct net_device *dev;
526 u32 int0_mask;
527 u32 int1_mask;
528
529 /* NAPI for descriptor based rx */
530 struct napi_struct napi ____cacheline_aligned;
531
532 /* transmit variables */
533 void __iomem *tx_bds;
534 struct enet_cb *tx_cbs;
535 unsigned int num_tx_bds;
536
537 struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
538
539 /* receive variables */
540 void __iomem *rx_bds;
541 void __iomem *rx_bd_assign_ptr;
542 int rx_bd_assign_index;
543 struct enet_cb *rx_cbs;
544 unsigned int num_rx_bds;
545 unsigned int rx_buf_len;
546 unsigned int rx_read_ptr;
547 unsigned int rx_c_index;
548
549 /* other misc variables */
550 struct bcmgenet_hw_params *hw_params;
551
552 /* MDIO bus variables */
553 wait_queue_head_t wq;
554 struct phy_device *phydev;
555 struct device_node *phy_dn;
556 struct mii_bus *mii_bus;
557
558 /* PHY device variables */
559 int old_duplex;
560 int old_link;
561 int old_pause;
562 phy_interface_t phy_interface;
563 int phy_addr;
564 int ext_phy;
565
566 /* Interrupt variables */
567 struct work_struct bcmgenet_irq_work;
568 int irq0;
569 int irq1;
570 unsigned int irq0_stat;
571 unsigned int irq1_stat;
572
573 /* HW descriptors/checksum variables */
574 bool desc_64b_en;
575 bool desc_rxchk_en;
576 bool crc_fwd_en;
577
578 unsigned int dma_rx_chk_bit;
579
580 u32 msg_enable;
581
582 struct clk *clk;
583 struct platform_device *pdev;
584
585 /* WOL */
586 unsigned long wol_enabled;
587 struct clk *clk_wol;
588 u32 wolopts;
589
590 struct bcmgenet_mib_counters mib;
591};
592
593#define GENET_IO_MACRO(name, offset) \
594static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
595 u32 off) \
596{ \
597 return __raw_readl(priv->base + offset + off); \
598} \
599static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
600 u32 val, u32 off) \
601{ \
602 __raw_writel(val, priv->base + offset + off); \
603}
604
605GENET_IO_MACRO(ext, GENET_EXT_OFF);
606GENET_IO_MACRO(umac, GENET_UMAC_OFF);
607GENET_IO_MACRO(sys, GENET_SYS_OFF);
608
609/* interrupt l2 registers accessors */
610GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
611GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
612
613/* HFB register accessors */
614GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
615
616/* GENET v2+ HFB control and filter len helpers */
617GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
618
619/* RBUF register accessors */
620GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
621
622/* MDIO routines */
623int bcmgenet_mii_init(struct net_device *dev);
624int bcmgenet_mii_config(struct net_device *dev);
625void bcmgenet_mii_exit(struct net_device *dev);
626void bcmgenet_mii_reset(struct net_device *dev);
627
628#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644
index 000000000000..4608673beaff
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -0,0 +1,464 @@
1/*
2 * Broadcom GENET MDIO routines
3 *
4 * Copyright (c) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20
21#include <linux/types.h>
22#include <linux/delay.h>
23#include <linux/wait.h>
24#include <linux/mii.h>
25#include <linux/ethtool.h>
26#include <linux/bitops.h>
27#include <linux/netdevice.h>
28#include <linux/platform_device.h>
29#include <linux/phy.h>
30#include <linux/phy_fixed.h>
31#include <linux/brcmphy.h>
32#include <linux/of.h>
33#include <linux/of_net.h>
34#include <linux/of_mdio.h>
35
36#include "bcmgenet.h"
37
38/* read a value from the MII */
39static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
40{
41 int ret;
42 struct net_device *dev = bus->priv;
43 struct bcmgenet_priv *priv = netdev_priv(dev);
44 u32 reg;
45
46 bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
47 (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
48 /* Start MDIO transaction*/
49 reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
50 reg |= MDIO_START_BUSY;
51 bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
52 wait_event_timeout(priv->wq,
53 !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
54 & MDIO_START_BUSY),
55 HZ / 100);
56 ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
57
58 if (ret & MDIO_READ_FAIL)
59 return -EIO;
60
61 return ret & 0xffff;
62}
63
64/* write a value to the MII */
65static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
66 int location, u16 val)
67{
68 struct net_device *dev = bus->priv;
69 struct bcmgenet_priv *priv = netdev_priv(dev);
70 u32 reg;
71
72 bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
73 (location << MDIO_REG_SHIFT) | (0xffff & val)),
74 UMAC_MDIO_CMD);
75 reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
76 reg |= MDIO_START_BUSY;
77 bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
78 wait_event_timeout(priv->wq,
79 !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
80 MDIO_START_BUSY),
81 HZ / 100);
82
83 return 0;
84}
85
86/* setup netdev link state when PHY link status change and
87 * update UMAC and RGMII block when link up
88 */
89static void bcmgenet_mii_setup(struct net_device *dev)
90{
91 struct bcmgenet_priv *priv = netdev_priv(dev);
92 struct phy_device *phydev = priv->phydev;
93 u32 reg, cmd_bits = 0;
94 unsigned int status_changed = 0;
95
96 if (priv->old_link != phydev->link) {
97 status_changed = 1;
98 priv->old_link = phydev->link;
99 }
100
101 if (phydev->link) {
102 /* program UMAC and RGMII block based on established link
103 * speed, pause, and duplex.
104 * the speed set in umac->cmd tell RGMII block which clock
105 * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit.
106 * receive clock is provided by PHY.
107 */
108 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
109 reg &= ~OOB_DISABLE;
110 reg |= RGMII_LINK;
111 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
112
113 /* speed */
114 if (phydev->speed == SPEED_1000)
115 cmd_bits = UMAC_SPEED_1000;
116 else if (phydev->speed == SPEED_100)
117 cmd_bits = UMAC_SPEED_100;
118 else
119 cmd_bits = UMAC_SPEED_10;
120 cmd_bits <<= CMD_SPEED_SHIFT;
121
122 if (priv->old_duplex != phydev->duplex) {
123 status_changed = 1;
124 priv->old_duplex = phydev->duplex;
125 }
126
127 /* duplex */
128 if (phydev->duplex != DUPLEX_FULL)
129 cmd_bits |= CMD_HD_EN;
130
131 if (priv->old_pause != phydev->pause) {
132 status_changed = 1;
133 priv->old_pause = phydev->pause;
134 }
135
136 /* pause capability */
137 if (!phydev->pause)
138 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
139
140 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
141 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
142 CMD_HD_EN |
143 CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
144 reg |= cmd_bits;
145 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
146 }
147
148 if (status_changed)
149 phy_print_status(phydev);
150}
151
152void bcmgenet_mii_reset(struct net_device *dev)
153{
154 struct bcmgenet_priv *priv = netdev_priv(dev);
155
156 if (priv->phydev) {
157 phy_init_hw(priv->phydev);
158 phy_start_aneg(priv->phydev);
159 }
160}
161
162static void bcmgenet_ephy_power_up(struct net_device *dev)
163{
164 struct bcmgenet_priv *priv = netdev_priv(dev);
165 u32 reg = 0;
166
167 /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
168 if (!GENET_IS_V4(priv))
169 return;
170
171 reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
172 reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
173 reg |= EXT_GPHY_RESET;
174 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
175 mdelay(2);
176
177 reg &= ~EXT_GPHY_RESET;
178 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
179 udelay(20);
180}
181
182static void bcmgenet_internal_phy_setup(struct net_device *dev)
183{
184 struct bcmgenet_priv *priv = netdev_priv(dev);
185 u32 reg;
186
187 /* Power up EPHY */
188 bcmgenet_ephy_power_up(dev);
189 /* enable APD */
190 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
191 reg |= EXT_PWR_DN_EN_LD;
192 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
193 bcmgenet_mii_reset(dev);
194}
195
196static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
197{
198 u32 reg;
199
200 /* Speed settings are set in bcmgenet_mii_setup() */
201 reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
202 reg |= LED_ACT_SOURCE_MAC;
203 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
204}
205
206int bcmgenet_mii_config(struct net_device *dev)
207{
208 struct bcmgenet_priv *priv = netdev_priv(dev);
209 struct phy_device *phydev = priv->phydev;
210 struct device *kdev = &priv->pdev->dev;
211 const char *phy_name = NULL;
212 u32 id_mode_dis = 0;
213 u32 port_ctrl;
214 u32 reg;
215
216 priv->ext_phy = !phy_is_internal(priv->phydev) &&
217 (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
218
219 if (phy_is_internal(priv->phydev))
220 priv->phy_interface = PHY_INTERFACE_MODE_NA;
221
222 switch (priv->phy_interface) {
223 case PHY_INTERFACE_MODE_NA:
224 case PHY_INTERFACE_MODE_MOCA:
225 /* Irrespective of the actually configured PHY speed (100 or
226 * 1000) GENETv4 only has an internal GPHY so we will just end
227 * up masking the Gigabit features from what we support, not
228 * switching to the EPHY
229 */
230 if (GENET_IS_V4(priv))
231 port_ctrl = PORT_MODE_INT_GPHY;
232 else
233 port_ctrl = PORT_MODE_INT_EPHY;
234
235 bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
236
237 if (phy_is_internal(priv->phydev)) {
238 phy_name = "internal PHY";
239 bcmgenet_internal_phy_setup(dev);
240 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
241 phy_name = "MoCA";
242 bcmgenet_moca_phy_setup(priv);
243 }
244 break;
245
246 case PHY_INTERFACE_MODE_MII:
247 phy_name = "external MII";
248 phydev->supported &= PHY_BASIC_FEATURES;
249 bcmgenet_sys_writel(priv,
250 PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
251 break;
252
253 case PHY_INTERFACE_MODE_REVMII:
254 phy_name = "external RvMII";
255 /* of_mdiobus_register took care of reading the 'max-speed'
256 * PHY property for us, effectively limiting the PHY supported
257 * capabilities, use that knowledge to also configure the
258 * Reverse MII interface correctly.
259 */
260 if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
261 PHY_BASIC_FEATURES)
262 port_ctrl = PORT_MODE_EXT_RVMII_25;
263 else
264 port_ctrl = PORT_MODE_EXT_RVMII_50;
265 bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
266 break;
267
268 case PHY_INTERFACE_MODE_RGMII:
269 /* RGMII_NO_ID: TXC transitions at the same time as TXD
270 * (requires PCB or receiver-side delay)
271 * RGMII: Add 2ns delay on TXC (90 degree shift)
272 *
273 * ID is implicitly disabled for 100Mbps (RG)MII operation.
274 */
275 id_mode_dis = BIT(16);
276 /* fall through */
277 case PHY_INTERFACE_MODE_RGMII_TXID:
278 if (id_mode_dis)
279 phy_name = "external RGMII (no delay)";
280 else
281 phy_name = "external RGMII (TX delay)";
282 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
283 reg |= RGMII_MODE_EN | id_mode_dis;
284 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
285 bcmgenet_sys_writel(priv,
286 PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
287 break;
288 default:
289 dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
290 return -EINVAL;
291 }
292
293 dev_info(kdev, "configuring instance for %s\n", phy_name);
294
295 return 0;
296}
297
298static int bcmgenet_mii_probe(struct net_device *dev)
299{
300 struct bcmgenet_priv *priv = netdev_priv(dev);
301 struct phy_device *phydev;
302 unsigned int phy_flags;
303 int ret;
304
305 if (priv->phydev) {
306 pr_info("PHY already attached\n");
307 return 0;
308 }
309
310 if (priv->phy_dn)
311 phydev = of_phy_connect(dev, priv->phy_dn,
312 bcmgenet_mii_setup, 0,
313 priv->phy_interface);
314 else
315 phydev = of_phy_connect_fixed_link(dev,
316 bcmgenet_mii_setup,
317 priv->phy_interface);
318
319 if (!phydev) {
320 pr_err("could not attach to PHY\n");
321 return -ENODEV;
322 }
323
324 priv->old_link = -1;
325 priv->old_duplex = -1;
326 priv->old_pause = -1;
327 priv->phydev = phydev;
328
329 /* Configure port multiplexer based on what the probed PHY device since
330 * reading the 'max-speed' property determines the maximum supported
331 * PHY speed which is needed for bcmgenet_mii_config() to configure
332 * things appropriately.
333 */
334 ret = bcmgenet_mii_config(dev);
335 if (ret) {
336 phy_disconnect(priv->phydev);
337 return ret;
338 }
339
340 phy_flags = PHY_BRCM_100MBPS_WAR;
341
342 /* workarounds are only needed for 100Mpbs PHYs, and
343 * never on GENET V1 hardware
344 */
345 if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
346 phy_flags = 0;
347
348 phydev->dev_flags |= phy_flags;
349 phydev->advertising = phydev->supported;
350
351 /* The internal PHY has its link interrupts routed to the
352 * Ethernet MAC ISRs
353 */
354 if (phy_is_internal(priv->phydev))
355 priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
356 else
357 priv->mii_bus->irq[phydev->addr] = PHY_POLL;
358
359 pr_info("attached PHY at address %d [%s]\n",
360 phydev->addr, phydev->drv->name);
361
362 return 0;
363}
364
365static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
366{
367 struct mii_bus *bus;
368
369 if (priv->mii_bus)
370 return 0;
371
372 priv->mii_bus = mdiobus_alloc();
373 if (!priv->mii_bus) {
374 pr_err("failed to allocate\n");
375 return -ENOMEM;
376 }
377
378 bus = priv->mii_bus;
379 bus->priv = priv->dev;
380 bus->name = "bcmgenet MII bus";
381 bus->parent = &priv->pdev->dev;
382 bus->read = bcmgenet_mii_read;
383 bus->write = bcmgenet_mii_write;
384 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
385 priv->pdev->name, priv->pdev->id);
386
387 bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
388 if (!bus->irq) {
389 mdiobus_free(priv->mii_bus);
390 return -ENOMEM;
391 }
392
393 return 0;
394}
395
396static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
397{
398 struct device_node *dn = priv->pdev->dev.of_node;
399 struct device *kdev = &priv->pdev->dev;
400 struct device_node *mdio_dn;
401 char *compat;
402 int ret;
403
404 compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
405 if (!compat)
406 return -ENOMEM;
407
408 mdio_dn = of_find_compatible_node(dn, NULL, compat);
409 kfree(compat);
410 if (!mdio_dn) {
411 dev_err(kdev, "unable to find MDIO bus node\n");
412 return -ENODEV;
413 }
414
415 ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
416 if (ret) {
417 dev_err(kdev, "failed to register MDIO bus\n");
418 return ret;
419 }
420
421 /* Fetch the PHY phandle */
422 priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
423
424 /* Get the link mode */
425 priv->phy_interface = of_get_phy_mode(dn);
426
427 return 0;
428}
429
430int bcmgenet_mii_init(struct net_device *dev)
431{
432 struct bcmgenet_priv *priv = netdev_priv(dev);
433 int ret;
434
435 ret = bcmgenet_mii_alloc(priv);
436 if (ret)
437 return ret;
438
439 ret = bcmgenet_mii_of_init(priv);
440 if (ret)
441 goto out_free;
442
443 ret = bcmgenet_mii_probe(dev);
444 if (ret)
445 goto out;
446
447 return 0;
448
449out:
450 mdiobus_unregister(priv->mii_bus);
451out_free:
452 kfree(priv->mii_bus->irq);
453 mdiobus_free(priv->mii_bus);
454 return ret;
455}
456
457void bcmgenet_mii_exit(struct net_device *dev)
458{
459 struct bcmgenet_priv *priv = netdev_priv(dev);
460
461 mdiobus_unregister(priv->mii_bus);
462 kfree(priv->mii_bus->irq);
463 mdiobus_free(priv->mii_bus);
464}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 70a225c8df5c..b9f7022f4e81 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1401,11 +1401,6 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1401 return ret; 1401 return ret;
1402} 1402}
1403 1403
1404static int tg3_mdio_reset(struct mii_bus *bp)
1405{
1406 return 0;
1407}
1408
1409static void tg3_mdio_config_5785(struct tg3 *tp) 1404static void tg3_mdio_config_5785(struct tg3 *tp)
1410{ 1405{
1411 u32 val; 1406 u32 val;
@@ -1542,7 +1537,6 @@ static int tg3_mdio_init(struct tg3 *tp)
1542 tp->mdio_bus->parent = &tp->pdev->dev; 1537 tp->mdio_bus->parent = &tp->pdev->dev;
1543 tp->mdio_bus->read = &tg3_mdio_read; 1538 tp->mdio_bus->read = &tg3_mdio_read;
1544 tp->mdio_bus->write = &tg3_mdio_write; 1539 tp->mdio_bus->write = &tg3_mdio_write;
1545 tp->mdio_bus->reset = &tg3_mdio_reset;
1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1540 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1541 tp->mdio_bus->irq = &tp->mdio_irq[0];
1548 1542
@@ -6322,6 +6316,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
6322 .n_alarm = 0, 6316 .n_alarm = 0,
6323 .n_ext_ts = 0, 6317 .n_ext_ts = 0,
6324 .n_per_out = 1, 6318 .n_per_out = 1,
6319 .n_pins = 0,
6325 .pps = 0, 6320 .pps = 0,
6326 .adjfreq = tg3_ptp_adjfreq, 6321 .adjfreq = tg3_ptp_adjfreq,
6327 .adjtime = tg3_ptp_adjtime, 6322 .adjtime = tg3_ptp_adjtime,
@@ -6593,7 +6588,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
6593 pkts_compl++; 6588 pkts_compl++;
6594 bytes_compl += skb->len; 6589 bytes_compl += skb->len;
6595 6590
6596 dev_kfree_skb(skb); 6591 dev_kfree_skb_any(skb);
6597 6592
6598 if (unlikely(tx_bug)) { 6593 if (unlikely(tx_bug)) {
6599 tg3_tx_recover(tp); 6594 tg3_tx_recover(tp);
@@ -6924,7 +6919,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
6924 6919
6925 if (len > (tp->dev->mtu + ETH_HLEN) && 6920 if (len > (tp->dev->mtu + ETH_HLEN) &&
6926 skb->protocol != htons(ETH_P_8021Q)) { 6921 skb->protocol != htons(ETH_P_8021Q)) {
6927 dev_kfree_skb(skb); 6922 dev_kfree_skb_any(skb);
6928 goto drop_it_no_recycle; 6923 goto drop_it_no_recycle;
6929 } 6924 }
6930 6925
@@ -7807,7 +7802,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7807 PCI_DMA_TODEVICE); 7802 PCI_DMA_TODEVICE);
7808 /* Make sure the mapping succeeded */ 7803 /* Make sure the mapping succeeded */
7809 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 7804 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7810 dev_kfree_skb(new_skb); 7805 dev_kfree_skb_any(new_skb);
7811 ret = -1; 7806 ret = -1;
7812 } else { 7807 } else {
7813 u32 save_entry = *entry; 7808 u32 save_entry = *entry;
@@ -7822,13 +7817,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7822 new_skb->len, base_flags, 7817 new_skb->len, base_flags,
7823 mss, vlan)) { 7818 mss, vlan)) {
7824 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7819 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825 dev_kfree_skb(new_skb); 7820 dev_kfree_skb_any(new_skb);
7826 ret = -1; 7821 ret = -1;
7827 } 7822 }
7828 } 7823 }
7829 } 7824 }
7830 7825
7831 dev_kfree_skb(skb); 7826 dev_kfree_skb_any(skb);
7832 *pskb = new_skb; 7827 *pskb = new_skb;
7833 return ret; 7828 return ret;
7834} 7829}
@@ -7871,7 +7866,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7871 } while (segs); 7866 } while (segs);
7872 7867
7873tg3_tso_bug_end: 7868tg3_tso_bug_end:
7874 dev_kfree_skb(skb); 7869 dev_kfree_skb_any(skb);
7875 7870
7876 return NETDEV_TX_OK; 7871 return NETDEV_TX_OK;
7877} 7872}
@@ -7923,8 +7918,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7923 struct iphdr *iph; 7918 struct iphdr *iph;
7924 u32 tcp_opt_len, hdr_len; 7919 u32 tcp_opt_len, hdr_len;
7925 7920
7926 if (skb_header_cloned(skb) && 7921 if (skb_cow_head(skb, 0))
7927 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7928 goto drop; 7922 goto drop;
7929 7923
7930 iph = ip_hdr(skb); 7924 iph = ip_hdr(skb);
@@ -8093,7 +8087,7 @@ dma_error:
8093 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8087 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8094 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8088 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8095drop: 8089drop:
8096 dev_kfree_skb(skb); 8090 dev_kfree_skb_any(skb);
8097drop_nofree: 8091drop_nofree:
8098 tp->tx_dropped++; 8092 tp->tx_dropped++;
8099 return NETDEV_TX_OK; 8093 return NETDEV_TX_OK;
@@ -11361,12 +11355,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
11361 msix_ent[i].vector = 0; 11355 msix_ent[i].vector = 0;
11362 } 11356 }
11363 11357
11364 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); 11358 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11365 if (rc < 0) { 11359 if (rc < 0) {
11366 return false; 11360 return false;
11367 } else if (rc != 0) { 11361 } else if (rc < tp->irq_cnt) {
11368 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11369 return false;
11370 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11362 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11371 tp->irq_cnt, rc); 11363 tp->irq_cnt, rc);
11372 tp->irq_cnt = rc; 11364 tp->irq_cnt = rc;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 4ad1187e82fb..675550fe8ee9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2496,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2496{ 2496{
2497 int err; 2497 int err;
2498 2498
2499 if (skb_header_cloned(skb)) { 2499 err = skb_cow_head(skb, 0);
2500 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2500 if (err < 0) {
2501 if (err) { 2501 BNAD_UPDATE_CTR(bnad, tso_err);
2502 BNAD_UPDATE_CTR(bnad, tso_err); 2502 return err;
2503 return err;
2504 }
2505 } 2503 }
2506 2504
2507 /* 2505 /*
@@ -2669,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad)
2669 for (i = 0; i < bnad->msix_num; i++) 2667 for (i = 0; i < bnad->msix_num; i++)
2670 bnad->msix_table[i].entry = i; 2668 bnad->msix_table[i].entry = i;
2671 2669
2672 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); 2670 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2673 if (ret > 0) { 2671 1, bnad->msix_num);
2674 /* Not enough MSI-X vectors. */ 2672 if (ret < 0) {
2673 goto intx_mode;
2674 } else if (ret < bnad->msix_num) {
2675 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", 2675 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2676 ret, bnad->msix_num); 2676 ret, bnad->msix_num);
2677 2677
@@ -2684,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad)
2684 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + 2684 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2685 BNAD_MAILBOX_MSIX_VECTORS; 2685 BNAD_MAILBOX_MSIX_VECTORS;
2686 2686
2687 if (bnad->msix_num > ret) 2687 if (bnad->msix_num > ret) {
2688 pci_disable_msix(bnad->pcidev);
2688 goto intx_mode; 2689 goto intx_mode;
2689 2690 }
2690 /* Try once more with adjusted numbers */ 2691 }
2691 /* If this fails, fall back to INTx */
2692 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2693 bnad->msix_num);
2694 if (ret)
2695 goto intx_mode;
2696
2697 } else if (ret < 0)
2698 goto intx_mode;
2699 2692
2700 pci_intx(bnad->pcidev, 0); 2693 pci_intx(bnad->pcidev, 0);
2701 2694
@@ -2850,13 +2843,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2850 } 2843 }
2851 if (unlikely((gso_size + skb_transport_offset(skb) + 2844 if (unlikely((gso_size + skb_transport_offset(skb) +
2852 tcp_hdrlen(skb)) >= skb->len)) { 2845 tcp_hdrlen(skb)) >= skb->len)) {
2853 txqent->hdr.wi.opcode = 2846 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2854 __constant_htons(BNA_TXQ_WI_SEND);
2855 txqent->hdr.wi.lso_mss = 0; 2847 txqent->hdr.wi.lso_mss = 0;
2856 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); 2848 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2857 } else { 2849 } else {
2858 txqent->hdr.wi.opcode = 2850 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2859 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2860 txqent->hdr.wi.lso_mss = htons(gso_size); 2851 txqent->hdr.wi.lso_mss = htons(gso_size);
2861 } 2852 }
2862 2853
@@ -2870,7 +2861,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2870 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( 2861 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2871 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); 2862 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2872 } else { 2863 } else {
2873 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); 2864 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2874 txqent->hdr.wi.lso_mss = 0; 2865 txqent->hdr.wi.lso_mss = 0;
2875 2866
2876 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { 2867 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
@@ -2881,11 +2872,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2881 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2872 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2882 u8 proto = 0; 2873 u8 proto = 0;
2883 2874
2884 if (skb->protocol == __constant_htons(ETH_P_IP)) 2875 if (skb->protocol == htons(ETH_P_IP))
2885 proto = ip_hdr(skb)->protocol; 2876 proto = ip_hdr(skb)->protocol;
2886#ifdef NETIF_F_IPV6_CSUM 2877#ifdef NETIF_F_IPV6_CSUM
2887 else if (skb->protocol == 2878 else if (skb->protocol == htons(ETH_P_IPV6)) {
2888 __constant_htons(ETH_P_IPV6)) {
2889 /* nexthdr may not be TCP immediately. */ 2879 /* nexthdr may not be TCP immediately. */
2890 proto = ipv6_hdr(skb)->nexthdr; 2880 proto = ipv6_hdr(skb)->nexthdr;
2891 } 2881 }
@@ -2954,17 +2944,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2954 /* Sanity checks for the skb */ 2944 /* Sanity checks for the skb */
2955 2945
2956 if (unlikely(skb->len <= ETH_HLEN)) { 2946 if (unlikely(skb->len <= ETH_HLEN)) {
2957 dev_kfree_skb(skb); 2947 dev_kfree_skb_any(skb);
2958 BNAD_UPDATE_CTR(bnad, tx_skb_too_short); 2948 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2959 return NETDEV_TX_OK; 2949 return NETDEV_TX_OK;
2960 } 2950 }
2961 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { 2951 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2962 dev_kfree_skb(skb); 2952 dev_kfree_skb_any(skb);
2963 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2953 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2964 return NETDEV_TX_OK; 2954 return NETDEV_TX_OK;
2965 } 2955 }
2966 if (unlikely(len == 0)) { 2956 if (unlikely(len == 0)) {
2967 dev_kfree_skb(skb); 2957 dev_kfree_skb_any(skb);
2968 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2958 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2969 return NETDEV_TX_OK; 2959 return NETDEV_TX_OK;
2970 } 2960 }
@@ -2976,7 +2966,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2976 * and the netif_tx_stop_all_queues() call. 2966 * and the netif_tx_stop_all_queues() call.
2977 */ 2967 */
2978 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { 2968 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2979 dev_kfree_skb(skb); 2969 dev_kfree_skb_any(skb);
2980 BNAD_UPDATE_CTR(bnad, tx_skb_stopping); 2970 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2981 return NETDEV_TX_OK; 2971 return NETDEV_TX_OK;
2982 } 2972 }
@@ -2989,7 +2979,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2989 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ 2979 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2990 2980
2991 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { 2981 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2992 dev_kfree_skb(skb); 2982 dev_kfree_skb_any(skb);
2993 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); 2983 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2994 return NETDEV_TX_OK; 2984 return NETDEV_TX_OK;
2995 } 2985 }
@@ -3029,7 +3019,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3029 3019
3030 /* Program the opcode, flags, frame_len, num_vectors in WI */ 3020 /* Program the opcode, flags, frame_len, num_vectors in WI */
3031 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { 3021 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3032 dev_kfree_skb(skb); 3022 dev_kfree_skb_any(skb);
3033 return NETDEV_TX_OK; 3023 return NETDEV_TX_OK;
3034 } 3024 }
3035 txqent->hdr.wi.reserved = 0; 3025 txqent->hdr.wi.reserved = 0;
@@ -3055,7 +3045,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3055 /* Undo the changes starting at tcb->producer_index */ 3045 /* Undo the changes starting at tcb->producer_index */
3056 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, 3046 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3057 tcb->producer_index); 3047 tcb->producer_index);
3058 dev_kfree_skb(skb); 3048 dev_kfree_skb_any(skb);
3059 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); 3049 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3060 return NETDEV_TX_OK; 3050 return NETDEV_TX_OK;
3061 } 3051 }
@@ -3067,8 +3057,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3067 vect_id = 0; 3057 vect_id = 0;
3068 BNA_QE_INDX_INC(prod, q_depth); 3058 BNA_QE_INDX_INC(prod, q_depth);
3069 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; 3059 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3070 txqent->hdr.wi_ext.opcode = 3060 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3071 __constant_htons(BNA_TXQ_WI_EXTENSION);
3072 unmap = &unmap_q[prod]; 3061 unmap = &unmap_q[prod];
3073 } 3062 }
3074 3063
@@ -3085,7 +3074,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3085 if (unlikely(len != skb->len)) { 3074 if (unlikely(len != skb->len)) {
3086 /* Undo the changes starting at tcb->producer_index */ 3075 /* Undo the changes starting at tcb->producer_index */
3087 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); 3076 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3088 dev_kfree_skb(skb); 3077 dev_kfree_skb_any(skb);
3089 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); 3078 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3090 return NETDEV_TX_OK; 3079 return NETDEV_TX_OK;
3091 } 3080 }
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index d0c38e01e99f..ca97005e24b4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -199,11 +199,6 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
199 return 0; 199 return 0;
200} 200}
201 201
202static int macb_mdio_reset(struct mii_bus *bus)
203{
204 return 0;
205}
206
207/** 202/**
208 * macb_set_tx_clk() - Set a clock to a new frequency 203 * macb_set_tx_clk() - Set a clock to a new frequency
209 * @clk Pointer to the clock to change 204 * @clk Pointer to the clock to change
@@ -375,7 +370,6 @@ int macb_mii_init(struct macb *bp)
375 bp->mii_bus->name = "MACB_mii_bus"; 370 bp->mii_bus->name = "MACB_mii_bus";
376 bp->mii_bus->read = &macb_mdio_read; 371 bp->mii_bus->read = &macb_mdio_read;
377 bp->mii_bus->write = &macb_mdio_write; 372 bp->mii_bus->write = &macb_mdio_write;
378 bp->mii_bus->reset = &macb_mdio_reset;
379 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 373 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
380 bp->pdev->name, bp->pdev->id); 374 bp->pdev->name, bp->pdev->id);
381 bp->mii_bus->priv = bp; 375 bp->mii_bus->priv = bp;
@@ -1045,7 +1039,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1045 mapping = dma_map_single(&bp->pdev->dev, skb->data, 1039 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1046 len, DMA_TO_DEVICE); 1040 len, DMA_TO_DEVICE);
1047 if (dma_mapping_error(&bp->pdev->dev, mapping)) { 1041 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
1048 kfree_skb(skb); 1042 dev_kfree_skb_any(skb);
1049 goto unlock; 1043 goto unlock;
1050 } 1044 }
1051 1045
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index d2a183c3a6ce..521dfea44b83 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
897 /* Check tx error on the last segment */ 897 /* Check tx error on the last segment */
898 if (desc_get_tx_ls(p)) { 898 if (desc_get_tx_ls(p)) {
899 desc_get_tx_status(priv, p); 899 desc_get_tx_status(priv, p);
900 dev_kfree_skb(skb); 900 dev_consume_skb_any(skb);
901 } 901 }
902 902
903 priv->tx_skbuff[entry] = NULL; 903 priv->tx_skbuff[entry] = NULL;
@@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1105 len = skb_headlen(skb); 1105 len = skb_headlen(skb);
1106 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1106 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1107 if (dma_mapping_error(priv->device, paddr)) { 1107 if (dma_mapping_error(priv->device, paddr)) {
1108 dev_kfree_skb(skb); 1108 dev_kfree_skb_any(skb);
1109 return NETDEV_TX_OK; 1109 return NETDEV_TX_OK;
1110 } 1110 }
1111 priv->tx_skbuff[entry] = skb; 1111 priv->tx_skbuff[entry] = skb;
@@ -1169,7 +1169,7 @@ dma_err:
1169 desc = first; 1169 desc = first;
1170 dma_unmap_single(priv->device, desc_get_buf_addr(desc), 1170 dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1171 desc_get_buf_len(desc), DMA_TO_DEVICE); 1171 desc_get_buf_len(desc), DMA_TO_DEVICE);
1172 dev_kfree_skb(skb); 1172 dev_kfree_skb_any(skb);
1173 return NETDEV_TX_OK; 1173 return NETDEV_TX_OK;
1174} 1174}
1175 1175
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 45d77334d7d9..07bbb711b7e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
3088{ 3088{
3089 struct msix_entry entries[SGE_QSETS + 1]; 3089 struct msix_entry entries[SGE_QSETS + 1];
3090 int vectors; 3090 int vectors;
3091 int i, err; 3091 int i;
3092 3092
3093 vectors = ARRAY_SIZE(entries); 3093 vectors = ARRAY_SIZE(entries);
3094 for (i = 0; i < vectors; ++i) 3094 for (i = 0; i < vectors; ++i)
3095 entries[i].entry = i; 3095 entries[i].entry = i;
3096 3096
3097 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) 3097 vectors = pci_enable_msix_range(adap->pdev, entries,
3098 vectors = err; 3098 adap->params.nports + 1, vectors);
3099 3099 if (vectors < 0)
3100 if (err < 0) 3100 return vectors;
3101 pci_disable_msix(adap->pdev);
3102
3103 if (!err && vectors < (adap->params.nports + 1)) {
3104 pci_disable_msix(adap->pdev);
3105 err = -1;
3106 }
3107 3101
3108 if (!err) { 3102 for (i = 0; i < vectors; ++i)
3109 for (i = 0; i < vectors; ++i) 3103 adap->msix_info[i].vec = entries[i].vector;
3110 adap->msix_info[i].vec = entries[i].vector; 3104 adap->msix_nvectors = vectors;
3111 adap->msix_nvectors = vectors;
3112 }
3113 3105
3114 return err; 3106 return 0;
3115} 3107}
3116 3108
3117static void print_port_info(struct adapter *adap, const struct adapter_info *ai) 3109static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 632b318eb38a..8b069f96e920 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
298 if (need_unmap) 298 if (need_unmap)
299 unmap_skb(d->skb, q, cidx, pdev); 299 unmap_skb(d->skb, q, cidx, pdev);
300 if (d->eop) { 300 if (d->eop) {
301 kfree_skb(d->skb); 301 dev_consume_skb_any(d->skb);
302 d->skb = NULL; 302 d->skb = NULL;
303 } 303 }
304 } 304 }
@@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1188 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | 1188 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1189 V_WR_TID(q->token)); 1189 V_WR_TID(q->token));
1190 wr_gen2(d, gen); 1190 wr_gen2(d, gen);
1191 kfree_skb(skb); 1191 dev_consume_skb_any(skb);
1192 return; 1192 return;
1193 } 1193 }
1194 1194
@@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1233 * anything shorter than an Ethernet header. 1233 * anything shorter than an Ethernet header.
1234 */ 1234 */
1235 if (unlikely(skb->len < ETH_HLEN)) { 1235 if (unlikely(skb->len < ETH_HLEN)) {
1236 dev_kfree_skb(skb); 1236 dev_kfree_skb_any(skb);
1237 return NETDEV_TX_OK; 1237 return NETDEV_TX_OK;
1238 } 1238 }
1239 1239
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1f4b9b30b9ed..32db37709263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -66,6 +66,7 @@ enum {
66 SERNUM_LEN = 24, /* Serial # length */ 66 SERNUM_LEN = 24, /* Serial # length */
67 EC_LEN = 16, /* E/C length */ 67 EC_LEN = 16, /* E/C length */
68 ID_LEN = 16, /* ID length */ 68 ID_LEN = 16, /* ID length */
69 PN_LEN = 16, /* Part Number length */
69}; 70};
70 71
71enum { 72enum {
@@ -254,6 +255,7 @@ struct vpd_params {
254 u8 ec[EC_LEN + 1]; 255 u8 ec[EC_LEN + 1];
255 u8 sn[SERNUM_LEN + 1]; 256 u8 sn[SERNUM_LEN + 1];
256 u8 id[ID_LEN + 1]; 257 u8 id[ID_LEN + 1];
258 u8 pn[PN_LEN + 1];
257}; 259};
258 260
259struct pci_params { 261struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
306 unsigned char bypass; 308 unsigned char bypass;
307 309
308 unsigned int ofldq_wr_cred; 310 unsigned int ofldq_wr_cred;
311 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
309}; 312};
310 313
311#include "t4fw_api.h" 314#include "t4fw_api.h"
@@ -497,6 +500,7 @@ struct sge_txq {
497 spinlock_t db_lock; 500 spinlock_t db_lock;
498 int db_disabled; 501 int db_disabled;
499 unsigned short db_pidx; 502 unsigned short db_pidx;
503 unsigned short db_pidx_inc;
500 u64 udb; 504 u64 udb;
501}; 505};
502 506
@@ -553,8 +557,13 @@ struct sge {
553 u32 pktshift; /* padding between CPL & packet data */ 557 u32 pktshift; /* padding between CPL & packet data */
554 u32 fl_align; /* response queue message alignment */ 558 u32 fl_align; /* response queue message alignment */
555 u32 fl_starve_thres; /* Free List starvation threshold */ 559 u32 fl_starve_thres; /* Free List starvation threshold */
556 unsigned int starve_thres; 560
557 u8 idma_state[2]; 561 /* State variables for detecting an SGE Ingress DMA hang */
562 unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
563 unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
564 unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
565 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
566
558 unsigned int egr_start; 567 unsigned int egr_start;
559 unsigned int ingr_start; 568 unsigned int ingr_start;
560 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 569 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
@@ -957,7 +966,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
957 u64 *parity); 966 u64 *parity);
958int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 967int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
959 u64 *parity); 968 u64 *parity);
960 969const char *t4_get_port_type_description(enum fw_port_type port_type);
961void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 970void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
962void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 971void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
963void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 972void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
@@ -1029,4 +1038,5 @@ void t4_db_dropped(struct adapter *adapter);
1029int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len); 1038int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
1030int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 1039int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
1031 u32 addr, u32 val); 1040 u32 addr, u32 val);
1041void t4_sge_decode_idma_state(struct adapter *adapter, int state);
1032#endif /* __CXGB4_H__ */ 1042#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 34e2488767d9..6fe58913403a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -254,6 +254,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
254 CH_DEVICE(0x5011, 4), 254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4), 255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4), 256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5014, 4),
258 CH_DEVICE(0x5015, 4),
259 CH_DEVICE(0x5080, 4),
260 CH_DEVICE(0x5081, 4),
261 CH_DEVICE(0x5082, 4),
262 CH_DEVICE(0x5083, 4),
263 CH_DEVICE(0x5084, 4),
264 CH_DEVICE(0x5085, 4),
257 CH_DEVICE(0x5401, 4), 265 CH_DEVICE(0x5401, 4),
258 CH_DEVICE(0x5402, 4), 266 CH_DEVICE(0x5402, 4),
259 CH_DEVICE(0x5403, 4), 267 CH_DEVICE(0x5403, 4),
@@ -273,6 +281,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
273 CH_DEVICE(0x5411, 4), 281 CH_DEVICE(0x5411, 4),
274 CH_DEVICE(0x5412, 4), 282 CH_DEVICE(0x5412, 4),
275 CH_DEVICE(0x5413, 4), 283 CH_DEVICE(0x5413, 4),
284 CH_DEVICE(0x5414, 4),
285 CH_DEVICE(0x5415, 4),
286 CH_DEVICE(0x5480, 4),
287 CH_DEVICE(0x5481, 4),
288 CH_DEVICE(0x5482, 4),
289 CH_DEVICE(0x5483, 4),
290 CH_DEVICE(0x5484, 4),
291 CH_DEVICE(0x5485, 4),
276 { 0, } 292 { 0, }
277}; 293};
278 294
@@ -423,15 +439,18 @@ static void link_report(struct net_device *dev)
423 const struct port_info *p = netdev_priv(dev); 439 const struct port_info *p = netdev_priv(dev);
424 440
425 switch (p->link_cfg.speed) { 441 switch (p->link_cfg.speed) {
426 case SPEED_10000: 442 case 10000:
427 s = "10Gbps"; 443 s = "10Gbps";
428 break; 444 break;
429 case SPEED_1000: 445 case 1000:
430 s = "1000Mbps"; 446 s = "1000Mbps";
431 break; 447 break;
432 case SPEED_100: 448 case 100:
433 s = "100Mbps"; 449 s = "100Mbps";
434 break; 450 break;
451 case 40000:
452 s = "40Gbps";
453 break;
435 } 454 }
436 455
437 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, 456 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2080,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
2061 0x40200, 0x40298, 2080 0x40200, 0x40298,
2062 0x402ac, 0x4033c, 2081 0x402ac, 0x4033c,
2063 0x403f8, 0x403fc, 2082 0x403f8, 0x403fc,
2064 0x41300, 0x413c4, 2083 0x41304, 0x413c4,
2065 0x41400, 0x4141c, 2084 0x41400, 0x4141c,
2066 0x41480, 0x414d0, 2085 0x41480, 0x414d0,
2067 0x44000, 0x44078, 2086 0x44000, 0x44078,
@@ -2089,7 +2108,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
2089 0x48200, 0x48298, 2108 0x48200, 0x48298,
2090 0x482ac, 0x4833c, 2109 0x482ac, 0x4833c,
2091 0x483f8, 0x483fc, 2110 0x483f8, 0x483fc,
2092 0x49300, 0x493c4, 2111 0x49304, 0x493c4,
2093 0x49400, 0x4941c, 2112 0x49400, 0x4941c,
2094 0x49480, 0x494d0, 2113 0x49480, 0x494d0,
2095 0x4c000, 0x4c078, 2114 0x4c000, 0x4c078,
@@ -2199,6 +2218,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2199 else if (type == FW_PORT_TYPE_FIBER_XFI || 2218 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2200 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 2219 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2201 v |= SUPPORTED_FIBRE; 2220 v |= SUPPORTED_FIBRE;
2221 else if (type == FW_PORT_TYPE_BP40_BA)
2222 v |= SUPPORTED_40000baseSR4_Full;
2202 2223
2203 if (caps & FW_PORT_CAP_ANEG) 2224 if (caps & FW_PORT_CAP_ANEG)
2204 v |= SUPPORTED_Autoneg; 2225 v |= SUPPORTED_Autoneg;
@@ -2215,6 +2236,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
2215 v |= FW_PORT_CAP_SPEED_1G; 2236 v |= FW_PORT_CAP_SPEED_1G;
2216 if (caps & ADVERTISED_10000baseT_Full) 2237 if (caps & ADVERTISED_10000baseT_Full)
2217 v |= FW_PORT_CAP_SPEED_10G; 2238 v |= FW_PORT_CAP_SPEED_10G;
2239 if (caps & ADVERTISED_40000baseSR4_Full)
2240 v |= FW_PORT_CAP_SPEED_40G;
2218 return v; 2241 return v;
2219} 2242}
2220 2243
@@ -2263,12 +2286,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2263 2286
2264static unsigned int speed_to_caps(int speed) 2287static unsigned int speed_to_caps(int speed)
2265{ 2288{
2266 if (speed == SPEED_100) 2289 if (speed == 100)
2267 return FW_PORT_CAP_SPEED_100M; 2290 return FW_PORT_CAP_SPEED_100M;
2268 if (speed == SPEED_1000) 2291 if (speed == 1000)
2269 return FW_PORT_CAP_SPEED_1G; 2292 return FW_PORT_CAP_SPEED_1G;
2270 if (speed == SPEED_10000) 2293 if (speed == 10000)
2271 return FW_PORT_CAP_SPEED_10G; 2294 return FW_PORT_CAP_SPEED_10G;
2295 if (speed == 40000)
2296 return FW_PORT_CAP_SPEED_40G;
2272 return 0; 2297 return 0;
2273} 2298}
2274 2299
@@ -2296,8 +2321,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2296 if (cmd->autoneg == AUTONEG_DISABLE) { 2321 if (cmd->autoneg == AUTONEG_DISABLE) {
2297 cap = speed_to_caps(speed); 2322 cap = speed_to_caps(speed);
2298 2323
2299 if (!(lc->supported & cap) || (speed == SPEED_1000) || 2324 if (!(lc->supported & cap) ||
2300 (speed == SPEED_10000)) 2325 (speed == 1000) ||
2326 (speed == 10000) ||
2327 (speed == 40000))
2301 return -EINVAL; 2328 return -EINVAL;
2302 lc->requested_speed = cap; 2329 lc->requested_speed = cap;
2303 lc->advertising = 0; 2330 lc->advertising = 0;
@@ -3205,8 +3232,8 @@ static int cxgb4_clip_get(const struct net_device *dev,
3205 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | 3232 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3206 FW_CMD_REQUEST | FW_CMD_WRITE); 3233 FW_CMD_REQUEST | FW_CMD_WRITE);
3207 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); 3234 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3208 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); 3235 c.ip_hi = *(__be64 *)(lip->s6_addr);
3209 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); 3236 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3210 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); 3237 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3211} 3238}
3212 3239
@@ -3221,8 +3248,8 @@ static int cxgb4_clip_release(const struct net_device *dev,
3221 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | 3248 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3222 FW_CMD_REQUEST | FW_CMD_READ); 3249 FW_CMD_REQUEST | FW_CMD_READ);
3223 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); 3250 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3224 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); 3251 c.ip_hi = *(__be64 *)(lip->s6_addr);
3225 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); 3252 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3226 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); 3253 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3227} 3254}
3228 3255
@@ -3563,14 +3590,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
3563 3590
3564static void disable_txq_db(struct sge_txq *q) 3591static void disable_txq_db(struct sge_txq *q)
3565{ 3592{
3566 spin_lock_irq(&q->db_lock); 3593 unsigned long flags;
3594
3595 spin_lock_irqsave(&q->db_lock, flags);
3567 q->db_disabled = 1; 3596 q->db_disabled = 1;
3568 spin_unlock_irq(&q->db_lock); 3597 spin_unlock_irqrestore(&q->db_lock, flags);
3569} 3598}
3570 3599
3571static void enable_txq_db(struct sge_txq *q) 3600static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3572{ 3601{
3573 spin_lock_irq(&q->db_lock); 3602 spin_lock_irq(&q->db_lock);
3603 if (q->db_pidx_inc) {
3604 /* Make sure that all writes to the TX descriptors
3605 * are committed before we tell HW about them.
3606 */
3607 wmb();
3608 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3609 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3610 q->db_pidx_inc = 0;
3611 }
3574 q->db_disabled = 0; 3612 q->db_disabled = 0;
3575 spin_unlock_irq(&q->db_lock); 3613 spin_unlock_irq(&q->db_lock);
3576} 3614}
@@ -3592,11 +3630,32 @@ static void enable_dbs(struct adapter *adap)
3592 int i; 3630 int i;
3593 3631
3594 for_each_ethrxq(&adap->sge, i) 3632 for_each_ethrxq(&adap->sge, i)
3595 enable_txq_db(&adap->sge.ethtxq[i].q); 3633 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3596 for_each_ofldrxq(&adap->sge, i) 3634 for_each_ofldrxq(&adap->sge, i)
3597 enable_txq_db(&adap->sge.ofldtxq[i].q); 3635 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3598 for_each_port(adap, i) 3636 for_each_port(adap, i)
3599 enable_txq_db(&adap->sge.ctrlq[i].q); 3637 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3638}
3639
3640static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3641{
3642 if (adap->uld_handle[CXGB4_ULD_RDMA])
3643 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3644 cmd);
3645}
3646
3647static void process_db_full(struct work_struct *work)
3648{
3649 struct adapter *adap;
3650
3651 adap = container_of(work, struct adapter, db_full_task);
3652
3653 drain_db_fifo(adap, dbfifo_drain_delay);
3654 enable_dbs(adap);
3655 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3656 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3657 DBFIFO_HP_INT | DBFIFO_LP_INT,
3658 DBFIFO_HP_INT | DBFIFO_LP_INT);
3600} 3659}
3601 3660
3602static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) 3661static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3604,7 +3663,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3604 u16 hw_pidx, hw_cidx; 3663 u16 hw_pidx, hw_cidx;
3605 int ret; 3664 int ret;
3606 3665
3607 spin_lock_bh(&q->db_lock); 3666 spin_lock_irq(&q->db_lock);
3608 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); 3667 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3609 if (ret) 3668 if (ret)
3610 goto out; 3669 goto out;
@@ -3621,7 +3680,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3621 } 3680 }
3622out: 3681out:
3623 q->db_disabled = 0; 3682 q->db_disabled = 0;
3624 spin_unlock_bh(&q->db_lock); 3683 q->db_pidx_inc = 0;
3684 spin_unlock_irq(&q->db_lock);
3625 if (ret) 3685 if (ret)
3626 CH_WARN(adap, "DB drop recovery failed.\n"); 3686 CH_WARN(adap, "DB drop recovery failed.\n");
3627} 3687}
@@ -3637,29 +3697,6 @@ static void recover_all_queues(struct adapter *adap)
3637 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); 3697 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3638} 3698}
3639 3699
3640static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3641{
3642 mutex_lock(&uld_mutex);
3643 if (adap->uld_handle[CXGB4_ULD_RDMA])
3644 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3645 cmd);
3646 mutex_unlock(&uld_mutex);
3647}
3648
3649static void process_db_full(struct work_struct *work)
3650{
3651 struct adapter *adap;
3652
3653 adap = container_of(work, struct adapter, db_full_task);
3654
3655 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3656 drain_db_fifo(adap, dbfifo_drain_delay);
3657 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3658 DBFIFO_HP_INT | DBFIFO_LP_INT,
3659 DBFIFO_HP_INT | DBFIFO_LP_INT);
3660 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3661}
3662
3663static void process_db_drop(struct work_struct *work) 3700static void process_db_drop(struct work_struct *work)
3664{ 3701{
3665 struct adapter *adap; 3702 struct adapter *adap;
@@ -3667,11 +3704,13 @@ static void process_db_drop(struct work_struct *work)
3667 adap = container_of(work, struct adapter, db_drop_task); 3704 adap = container_of(work, struct adapter, db_drop_task);
3668 3705
3669 if (is_t4(adap->params.chip)) { 3706 if (is_t4(adap->params.chip)) {
3670 disable_dbs(adap); 3707 drain_db_fifo(adap, dbfifo_drain_delay);
3671 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); 3708 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3672 drain_db_fifo(adap, 1); 3709 drain_db_fifo(adap, dbfifo_drain_delay);
3673 recover_all_queues(adap); 3710 recover_all_queues(adap);
3711 drain_db_fifo(adap, dbfifo_drain_delay);
3674 enable_dbs(adap); 3712 enable_dbs(adap);
3713 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3675 } else { 3714 } else {
3676 u32 dropped_db = t4_read_reg(adap, 0x010ac); 3715 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3677 u16 qid = (dropped_db >> 15) & 0x1ffff; 3716 u16 qid = (dropped_db >> 15) & 0x1ffff;
@@ -3712,6 +3751,8 @@ static void process_db_drop(struct work_struct *work)
3712void t4_db_full(struct adapter *adap) 3751void t4_db_full(struct adapter *adap)
3713{ 3752{
3714 if (is_t4(adap->params.chip)) { 3753 if (is_t4(adap->params.chip)) {
3754 disable_dbs(adap);
3755 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3715 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3756 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3716 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3757 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3717 queue_work(workq, &adap->db_full_task); 3758 queue_work(workq, &adap->db_full_task);
@@ -3720,8 +3761,11 @@ void t4_db_full(struct adapter *adap)
3720 3761
3721void t4_db_dropped(struct adapter *adap) 3762void t4_db_dropped(struct adapter *adap)
3722{ 3763{
3723 if (is_t4(adap->params.chip)) 3764 if (is_t4(adap->params.chip)) {
3724 queue_work(workq, &adap->db_drop_task); 3765 disable_dbs(adap);
3766 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3767 }
3768 queue_work(workq, &adap->db_drop_task);
3725} 3769}
3726 3770
3727static void uld_attach(struct adapter *adap, unsigned int uld) 3771static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3765,6 +3809,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3765 lli.dbfifo_int_thresh = dbfifo_int_thresh; 3809 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3766 lli.sge_pktshift = adap->sge.pktshift; 3810 lli.sge_pktshift = adap->sge.pktshift;
3767 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 3811 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3812 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
3768 3813
3769 handle = ulds[uld].add(&lli); 3814 handle = ulds[uld].add(&lli);
3770 if (IS_ERR(handle)) { 3815 if (IS_ERR(handle)) {
@@ -5370,6 +5415,21 @@ static int adap_init0(struct adapter *adap)
5370 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); 5415 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5371 5416
5372 /* 5417 /*
5418 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5419 * capability. Earlier versions of the firmware didn't have the
5420 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5421 * permission to use ULPTX MEMWRITE DSGL.
5422 */
5423 if (is_t4(adap->params.chip)) {
5424 adap->params.ulptx_memwrite_dsgl = false;
5425 } else {
5426 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5427 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5428 1, params, val);
5429 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5430 }
5431
5432 /*
5373 * Get device capabilities so we can determine what resources we need 5433 * Get device capabilities so we can determine what resources we need
5374 * to manage. 5434 * to manage.
5375 */ 5435 */
@@ -5603,9 +5663,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
5603 .resume = eeh_resume, 5663 .resume = eeh_resume,
5604}; 5664};
5605 5665
5606static inline bool is_10g_port(const struct link_config *lc) 5666static inline bool is_x_10g_port(const struct link_config *lc)
5607{ 5667{
5608 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; 5668 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5669 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5609} 5670}
5610 5671
5611static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, 5672static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5690,7 @@ static void cfg_queues(struct adapter *adap)
5629 int i, q10g = 0, n10g = 0, qidx = 0; 5690 int i, q10g = 0, n10g = 0, qidx = 0;
5630 5691
5631 for_each_port(adap, i) 5692 for_each_port(adap, i)
5632 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); 5693 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5633 5694
5634 /* 5695 /*
5635 * We default to 1 queue per non-10G port and up to # of cores queues 5696 * We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5705,7 @@ static void cfg_queues(struct adapter *adap)
5644 struct port_info *pi = adap2pinfo(adap, i); 5705 struct port_info *pi = adap2pinfo(adap, i);
5645 5706
5646 pi->first_qset = qidx; 5707 pi->first_qset = qidx;
5647 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; 5708 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5648 qidx += pi->nqsets; 5709 qidx += pi->nqsets;
5649 } 5710 }
5650 5711
@@ -5737,7 +5798,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
5737static int enable_msix(struct adapter *adap) 5798static int enable_msix(struct adapter *adap)
5738{ 5799{
5739 int ofld_need = 0; 5800 int ofld_need = 0;
5740 int i, err, want, need; 5801 int i, want, need;
5741 struct sge *s = &adap->sge; 5802 struct sge *s = &adap->sge;
5742 unsigned int nchan = adap->params.nports; 5803 unsigned int nchan = adap->params.nports;
5743 struct msix_entry entries[MAX_INGQ + 1]; 5804 struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5814,30 @@ static int enable_msix(struct adapter *adap)
5753 } 5814 }
5754 need = adap->params.nports + EXTRA_VECS + ofld_need; 5815 need = adap->params.nports + EXTRA_VECS + ofld_need;
5755 5816
5756 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) 5817 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5757 want = err; 5818 if (want < 0)
5819 return want;
5758 5820
5759 if (!err) { 5821 /*
5760 /* 5822 * Distribute available vectors to the various queue groups.
5761 * Distribute available vectors to the various queue groups. 5823 * Every group gets its minimum requirement and NIC gets top
5762 * Every group gets its minimum requirement and NIC gets top 5824 * priority for leftovers.
5763 * priority for leftovers. 5825 */
5764 */ 5826 i = want - EXTRA_VECS - ofld_need;
5765 i = want - EXTRA_VECS - ofld_need; 5827 if (i < s->max_ethqsets) {
5766 if (i < s->max_ethqsets) { 5828 s->max_ethqsets = i;
5767 s->max_ethqsets = i; 5829 if (i < s->ethqsets)
5768 if (i < s->ethqsets) 5830 reduce_ethqs(adap, i);
5769 reduce_ethqs(adap, i); 5831 }
5770 } 5832 if (is_offload(adap)) {
5771 if (is_offload(adap)) { 5833 i = want - EXTRA_VECS - s->max_ethqsets;
5772 i = want - EXTRA_VECS - s->max_ethqsets; 5834 i -= ofld_need - nchan;
5773 i -= ofld_need - nchan; 5835 s->ofldqsets = (i / nchan) * nchan; /* round down */
5774 s->ofldqsets = (i / nchan) * nchan; /* round down */ 5836 }
5775 } 5837 for (i = 0; i < want; ++i)
5776 for (i = 0; i < want; ++i) 5838 adap->msix_info[i].vec = entries[i].vector;
5777 adap->msix_info[i].vec = entries[i].vector; 5839
5778 } else if (err > 0) 5840 return 0;
5779 dev_info(adap->pdev_dev,
5780 "only %d MSI-X vectors left, not using MSI-X\n", err);
5781 return err;
5782} 5841}
5783 5842
5784#undef EXTRA_VECS 5843#undef EXTRA_VECS
@@ -5801,11 +5860,6 @@ static int init_rss(struct adapter *adap)
5801 5860
5802static void print_port_info(const struct net_device *dev) 5861static void print_port_info(const struct net_device *dev)
5803{ 5862{
5804 static const char *base[] = {
5805 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5806 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5807 };
5808
5809 char buf[80]; 5863 char buf[80];
5810 char *bufp = buf; 5864 char *bufp = buf;
5811 const char *spd = ""; 5865 const char *spd = "";
@@ -5823,9 +5877,11 @@ static void print_port_info(const struct net_device *dev)
5823 bufp += sprintf(bufp, "1000/"); 5877 bufp += sprintf(bufp, "1000/");
5824 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 5878 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5825 bufp += sprintf(bufp, "10G/"); 5879 bufp += sprintf(bufp, "10G/");
5880 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5881 bufp += sprintf(bufp, "40G/");
5826 if (bufp != buf) 5882 if (bufp != buf)
5827 --bufp; 5883 --bufp;
5828 sprintf(bufp, "BASE-%s", base[pi->port_type]); 5884 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5829 5885
5830 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 5886 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5831 adap->params.vpd.id, 5887 adap->params.vpd.id,
@@ -5833,8 +5889,8 @@ static void print_port_info(const struct net_device *dev)
5833 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 5889 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5834 (adap->flags & USING_MSIX) ? " MSI-X" : 5890 (adap->flags & USING_MSIX) ? " MSI-X" :
5835 (adap->flags & USING_MSI) ? " MSI" : ""); 5891 (adap->flags & USING_MSI) ? " MSI" : "");
5836 netdev_info(dev, "S/N: %s, E/C: %s\n", 5892 netdev_info(dev, "S/N: %s, P/N: %s\n",
5837 adap->params.vpd.sn, adap->params.vpd.ec); 5893 adap->params.vpd.sn, adap->params.vpd.pn);
5838} 5894}
5839 5895
5840static void enable_pcie_relaxed_ordering(struct pci_dev *dev) 5896static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4dd0a82533e4..e274a047528f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
253 /* packet data */ 253 /* packet data */
254 bool enable_fw_ofld_conn; /* Enable connection through fw */ 254 bool enable_fw_ofld_conn; /* Enable connection through fw */
255 /* WR */ 255 /* WR */
256 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
256}; 257};
257 258
258struct cxgb4_uld_info { 259struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 47ffa64fcf19..ca95cf2954eb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -93,6 +93,16 @@
93 */ 93 */
94#define TX_QCHECK_PERIOD (HZ / 2) 94#define TX_QCHECK_PERIOD (HZ / 2)
95 95
96/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
97 * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
98 * State Machines in the same state for this amount of time (in HZ) then we'll
99 * issue a warning about a potential hang. We'll repeat the warning as the
100 * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
101 * the situation clears. If the situation clears, we'll note that as well.
102 */
103#define SGE_IDMA_WARN_THRESH (1 * HZ)
104#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
105
96/* 106/*
97 * Max number of Tx descriptors to be reclaimed by the Tx timer. 107 * Max number of Tx descriptors to be reclaimed by the Tx timer.
98 */ 108 */
@@ -373,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
373 if (d->skb) { /* an SGL is present */ 383 if (d->skb) { /* an SGL is present */
374 if (unmap) 384 if (unmap)
375 unmap_sgl(dev, d->skb, d->sgl, q); 385 unmap_sgl(dev, d->skb, d->sgl, q);
376 kfree_skb(d->skb); 386 dev_consume_skb_any(d->skb);
377 d->skb = NULL; 387 d->skb = NULL;
378 } 388 }
379 ++d; 389 ++d;
@@ -706,11 +716,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
706 * @skb: the packet 716 * @skb: the packet
707 * 717 *
708 * Returns whether an Ethernet packet is small enough to fit as 718 * Returns whether an Ethernet packet is small enough to fit as
709 * immediate data. 719 * immediate data. Return value corresponds to headroom required.
710 */ 720 */
711static inline int is_eth_imm(const struct sk_buff *skb) 721static inline int is_eth_imm(const struct sk_buff *skb)
712{ 722{
713 return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); 723 int hdrlen = skb_shinfo(skb)->gso_size ?
724 sizeof(struct cpl_tx_pkt_lso_core) : 0;
725
726 hdrlen += sizeof(struct cpl_tx_pkt);
727 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
728 return hdrlen;
729 return 0;
714} 730}
715 731
716/** 732/**
@@ -723,9 +739,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
723static inline unsigned int calc_tx_flits(const struct sk_buff *skb) 739static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
724{ 740{
725 unsigned int flits; 741 unsigned int flits;
742 int hdrlen = is_eth_imm(skb);
726 743
727 if (is_eth_imm(skb)) 744 if (hdrlen)
728 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); 745 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
729 746
730 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; 747 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
731 if (skb_shinfo(skb)->gso_size) 748 if (skb_shinfo(skb)->gso_size)
@@ -843,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
843static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 860static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
844{ 861{
845 unsigned int *wr, index; 862 unsigned int *wr, index;
863 unsigned long flags;
846 864
847 wmb(); /* write descriptors before telling HW */ 865 wmb(); /* write descriptors before telling HW */
848 spin_lock(&q->db_lock); 866 spin_lock_irqsave(&q->db_lock, flags);
849 if (!q->db_disabled) { 867 if (!q->db_disabled) {
850 if (is_t4(adap->params.chip)) { 868 if (is_t4(adap->params.chip)) {
851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 869 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
@@ -861,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
861 writel(n, adap->bar2 + q->udb + 8); 879 writel(n, adap->bar2 + q->udb + 8);
862 wmb(); 880 wmb();
863 } 881 }
864 } 882 } else
883 q->db_pidx_inc += n;
865 q->db_pidx = q->pidx; 884 q->db_pidx = q->pidx;
866 spin_unlock(&q->db_lock); 885 spin_unlock_irqrestore(&q->db_lock, flags);
867} 886}
868 887
869/** 888/**
@@ -971,6 +990,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
971 */ 990 */
972netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 991netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
973{ 992{
993 int len;
974 u32 wr_mid; 994 u32 wr_mid;
975 u64 cntrl, *end; 995 u64 cntrl, *end;
976 int qidx, credits; 996 int qidx, credits;
@@ -982,13 +1002,14 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
982 struct cpl_tx_pkt_core *cpl; 1002 struct cpl_tx_pkt_core *cpl;
983 const struct skb_shared_info *ssi; 1003 const struct skb_shared_info *ssi;
984 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 1004 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1005 bool immediate = false;
985 1006
986 /* 1007 /*
987 * The chip min packet length is 10 octets but play safe and reject 1008 * The chip min packet length is 10 octets but play safe and reject
988 * anything shorter than an Ethernet header. 1009 * anything shorter than an Ethernet header.
989 */ 1010 */
990 if (unlikely(skb->len < ETH_HLEN)) { 1011 if (unlikely(skb->len < ETH_HLEN)) {
991out_free: dev_kfree_skb(skb); 1012out_free: dev_kfree_skb_any(skb);
992 return NETDEV_TX_OK; 1013 return NETDEV_TX_OK;
993 } 1014 }
994 1015
@@ -1011,7 +1032,10 @@ out_free: dev_kfree_skb(skb);
1011 return NETDEV_TX_BUSY; 1032 return NETDEV_TX_BUSY;
1012 } 1033 }
1013 1034
1014 if (!is_eth_imm(skb) && 1035 if (is_eth_imm(skb))
1036 immediate = true;
1037
1038 if (!immediate &&
1015 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { 1039 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1016 q->mapping_err++; 1040 q->mapping_err++;
1017 goto out_free; 1041 goto out_free;
@@ -1028,6 +1052,7 @@ out_free: dev_kfree_skb(skb);
1028 wr->r3 = cpu_to_be64(0); 1052 wr->r3 = cpu_to_be64(0);
1029 end = (u64 *)wr + flits; 1053 end = (u64 *)wr + flits;
1030 1054
1055 len = immediate ? skb->len : 0;
1031 ssi = skb_shinfo(skb); 1056 ssi = skb_shinfo(skb);
1032 if (ssi->gso_size) { 1057 if (ssi->gso_size) {
1033 struct cpl_tx_pkt_lso *lso = (void *)wr; 1058 struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1060,9 @@ out_free: dev_kfree_skb(skb);
1035 int l3hdr_len = skb_network_header_len(skb); 1060 int l3hdr_len = skb_network_header_len(skb);
1036 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1061 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1037 1062
1063 len += sizeof(*lso);
1038 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | 1064 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1039 FW_WR_IMMDLEN(sizeof(*lso))); 1065 FW_WR_IMMDLEN(len));
1040 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | 1066 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1041 LSO_FIRST_SLICE | LSO_LAST_SLICE | 1067 LSO_FIRST_SLICE | LSO_LAST_SLICE |
1042 LSO_IPV6(v6) | 1068 LSO_IPV6(v6) |
@@ -1054,9 +1080,7 @@ out_free: dev_kfree_skb(skb);
1054 q->tso++; 1080 q->tso++;
1055 q->tx_cso += ssi->gso_segs; 1081 q->tx_cso += ssi->gso_segs;
1056 } else { 1082 } else {
1057 int len; 1083 len += sizeof(*cpl);
1058
1059 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1060 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | 1084 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1061 FW_WR_IMMDLEN(len)); 1085 FW_WR_IMMDLEN(len));
1062 cpl = (void *)(wr + 1); 1086 cpl = (void *)(wr + 1);
@@ -1078,9 +1102,9 @@ out_free: dev_kfree_skb(skb);
1078 cpl->len = htons(skb->len); 1102 cpl->len = htons(skb->len);
1079 cpl->ctrl1 = cpu_to_be64(cntrl); 1103 cpl->ctrl1 = cpu_to_be64(cntrl);
1080 1104
1081 if (is_eth_imm(skb)) { 1105 if (immediate) {
1082 inline_tx_skb(skb, &q->q, cpl + 1); 1106 inline_tx_skb(skb, &q->q, cpl + 1);
1083 dev_kfree_skb(skb); 1107 dev_consume_skb_any(skb);
1084 } else { 1108 } else {
1085 int last_desc; 1109 int last_desc;
1086 1110
@@ -1467,8 +1491,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1467{ 1491{
1468 unsigned int idx = skb_txq(skb); 1492 unsigned int idx = skb_txq(skb);
1469 1493
1470 if (unlikely(is_ctrl_pkt(skb))) 1494 if (unlikely(is_ctrl_pkt(skb))) {
1495 /* Single ctrl queue is a requirement for LE workaround path */
1496 if (adap->tids.nsftids)
1497 idx = 0;
1471 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 1498 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1499 }
1472 return ofld_xmit(&adap->sge.ofldtxq[idx], skb); 1500 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1473} 1501}
1474 1502
@@ -1992,7 +2020,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
1992static void sge_rx_timer_cb(unsigned long data) 2020static void sge_rx_timer_cb(unsigned long data)
1993{ 2021{
1994 unsigned long m; 2022 unsigned long m;
1995 unsigned int i, cnt[2]; 2023 unsigned int i, idma_same_state_cnt[2];
1996 struct adapter *adap = (struct adapter *)data; 2024 struct adapter *adap = (struct adapter *)data;
1997 struct sge *s = &adap->sge; 2025 struct sge *s = &adap->sge;
1998 2026
@@ -2015,21 +2043,64 @@ static void sge_rx_timer_cb(unsigned long data)
2015 } 2043 }
2016 2044
2017 t4_write_reg(adap, SGE_DEBUG_INDEX, 13); 2045 t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
2018 cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); 2046 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
2019 cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2047 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2020 2048
2021 for (i = 0; i < 2; i++) 2049 for (i = 0; i < 2; i++) {
2022 if (cnt[i] >= s->starve_thres) { 2050 u32 debug0, debug11;
2023 if (s->idma_state[i] || cnt[i] == 0xffffffff) 2051
2024 continue; 2052 /* If the Ingress DMA Same State Counter ("timer") is less
2025 s->idma_state[i] = 1; 2053 * than 1s, then we can reset our synthesized Stall Timer and
2026 t4_write_reg(adap, SGE_DEBUG_INDEX, 11); 2054 * continue. If we have previously emitted warnings about a
2027 m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); 2055 * potential stalled Ingress Queue, issue a note indicating
2028 dev_warn(adap->pdev_dev, 2056 * that the Ingress Queue has resumed forward progress.
2029 "SGE idma%u starvation detected for " 2057 */
2030 "queue %lu\n", i, m & 0xffff); 2058 if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
2031 } else if (s->idma_state[i]) 2059 if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
2032 s->idma_state[i] = 0; 2060 CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
2061 i, s->idma_qid[i],
2062 s->idma_stalled[i]/HZ);
2063 s->idma_stalled[i] = 0;
2064 continue;
2065 }
2066
2067 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
2068 * domain. The first time we get here it'll be because we
2069 * passed the 1s Threshold; each additional time it'll be
2070 * because the RX Timer Callback is being fired on its regular
2071 * schedule.
2072 *
2073 * If the stall is below our Potential Hung Ingress Queue
2074 * Warning Threshold, continue.
2075 */
2076 if (s->idma_stalled[i] == 0)
2077 s->idma_stalled[i] = HZ;
2078 else
2079 s->idma_stalled[i] += RX_QCHECK_PERIOD;
2080
2081 if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
2082 continue;
2083
2084 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
2085 if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
2086 continue;
2087
2088 /* Read and save the SGE IDMA State and Queue ID information.
2089 * We do this every time in case it changes across time ...
2090 */
2091 t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
2092 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2093 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2094
2095 t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
2096 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2097 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2098
2099 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
2100 i, s->idma_qid[i], s->idma_state[i],
2101 s->idma_stalled[i]/HZ, debug0, debug11);
2102 t4_sge_decode_idma_state(adap, s->idma_state[i]);
2103 }
2033 2104
2034 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 2105 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2035} 2106}
@@ -2580,11 +2651,19 @@ static int t4_sge_init_soft(struct adapter *adap)
2580 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 2651 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2581 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 2652 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2582 2653
2654 /* We only bother using the Large Page logic if the Large Page Buffer
2655 * is larger than our Page Size Buffer.
2656 */
2657 if (fl_large_pg <= fl_small_pg)
2658 fl_large_pg = 0;
2659
2583 #undef READ_FL_BUF 2660 #undef READ_FL_BUF
2584 2661
2662 /* The Page Size Buffer must be exactly equal to our Page Size and the
2663 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2664 */
2585 if (fl_small_pg != PAGE_SIZE || 2665 if (fl_small_pg != PAGE_SIZE ||
2586 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || 2666 (fl_large_pg & (fl_large_pg-1)) != 0) {
2587 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2588 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2667 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2589 fl_small_pg, fl_large_pg); 2668 fl_small_pg, fl_large_pg);
2590 return -EINVAL; 2669 return -EINVAL;
@@ -2699,8 +2778,8 @@ static int t4_sge_init_hard(struct adapter *adap)
2699int t4_sge_init(struct adapter *adap) 2778int t4_sge_init(struct adapter *adap)
2700{ 2779{
2701 struct sge *s = &adap->sge; 2780 struct sge *s = &adap->sge;
2702 u32 sge_control; 2781 u32 sge_control, sge_conm_ctrl;
2703 int ret; 2782 int ret, egress_threshold;
2704 2783
2705 /* 2784 /*
2706 * Ingress Padding Boundary and Egress Status Page Size are set up by 2785 * Ingress Padding Boundary and Egress Status Page Size are set up by
@@ -2725,15 +2804,24 @@ int t4_sge_init(struct adapter *adap)
2725 * SGE's Egress Congestion Threshold. If it isn't, then we can get 2804 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2726 * stuck waiting for new packets while the SGE is waiting for us to 2805 * stuck waiting for new packets while the SGE is waiting for us to
2727 * give it more Free List entries. (Note that the SGE's Egress 2806 * give it more Free List entries. (Note that the SGE's Egress
2728 * Congestion Threshold is in units of 2 Free List pointers.) 2807 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2808 * there was only a single field to control this. For T5 there's the
2809 * original field which now only applies to Unpacked Mode Free List
2810 * buffers and a new field which only applies to Packed Mode Free List
2811 * buffers.
2729 */ 2812 */
2730 s->fl_starve_thres 2813 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
2731 = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1; 2814 if (is_t4(adap->params.chip))
2815 egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
2816 else
2817 egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
2818 s->fl_starve_thres = 2*egress_threshold + 1;
2732 2819
2733 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2820 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2734 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); 2821 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2735 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ 2822 s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2736 s->idma_state[0] = s->idma_state[1] = 0; 2823 s->idma_stalled[0] = 0;
2824 s->idma_stalled[1] = 0;
2737 spin_lock_init(&s->intrq_lock); 2825 spin_lock_init(&s->intrq_lock);
2738 2826
2739 return 0; 2827 return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2c109343d570..fb2fe65903c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
573{ 573{
574 u32 cclk_param, cclk_val; 574 u32 cclk_param, cclk_val;
575 int i, ret, addr; 575 int i, ret, addr;
576 int ec, sn; 576 int ec, sn, pn;
577 u8 *vpd, csum; 577 u8 *vpd, csum;
578 unsigned int vpdr_len, kw_offset, id_len; 578 unsigned int vpdr_len, kw_offset, id_len;
579 579
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
638 638
639 FIND_VPD_KW(ec, "EC"); 639 FIND_VPD_KW(ec, "EC");
640 FIND_VPD_KW(sn, "SN"); 640 FIND_VPD_KW(sn, "SN");
641 FIND_VPD_KW(pn, "PN");
641#undef FIND_VPD_KW 642#undef FIND_VPD_KW
642 643
643 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 644 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
647 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 648 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
648 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 649 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
649 strim(p->sn); 650 strim(p->sn);
651 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
652 strim(p->pn);
650 653
651 /* 654 /*
652 * Ask firmware for the Core Clock since it knows how to translate the 655 * Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
1155} 1158}
1156 1159
1157#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1160#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1158 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) 1161 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1162 FW_PORT_CAP_ANEG)
1159 1163
1160/** 1164/**
1161 * t4_link_start - apply link configuration to MAC/PHY 1165 * t4_link_start - apply link configuration to MAC/PHY
@@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2247} 2251}
2248 2252
2249/** 2253/**
2254 * t4_get_port_type_description - return Port Type string description
2255 * @port_type: firmware Port Type enumeration
2256 */
2257const char *t4_get_port_type_description(enum fw_port_type port_type)
2258{
2259 static const char *const port_type_description[] = {
2260 "R XFI",
2261 "R XAUI",
2262 "T SGMII",
2263 "T XFI",
2264 "T XAUI",
2265 "KX4",
2266 "CX4",
2267 "KX",
2268 "KR",
2269 "R SFP+",
2270 "KR/KX",
2271 "KR/KX/KX4",
2272 "R QSFP_10G",
2273 "",
2274 "R QSFP",
2275 "R BP40_BA",
2276 };
2277
2278 if (port_type < ARRAY_SIZE(port_type_description))
2279 return port_type_description[port_type];
2280 return "UNKNOWN";
2281}
2282
2283/**
2250 * t4_get_port_stats - collect port statistics 2284 * t4_get_port_stats - collect port statistics
2251 * @adap: the adapter 2285 * @adap: the adapter
2252 * @idx: the port index 2286 * @idx: the port index
@@ -2563,6 +2597,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2563} 2597}
2564 2598
2565/** 2599/**
2600 * t4_sge_decode_idma_state - decode the idma state
2601 * @adap: the adapter
2602 * @state: the state idma is stuck in
2603 */
2604void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2605{
2606 static const char * const t4_decode[] = {
2607 "IDMA_IDLE",
2608 "IDMA_PUSH_MORE_CPL_FIFO",
2609 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2610 "Not used",
2611 "IDMA_PHYSADDR_SEND_PCIEHDR",
2612 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2613 "IDMA_PHYSADDR_SEND_PAYLOAD",
2614 "IDMA_SEND_FIFO_TO_IMSG",
2615 "IDMA_FL_REQ_DATA_FL_PREP",
2616 "IDMA_FL_REQ_DATA_FL",
2617 "IDMA_FL_DROP",
2618 "IDMA_FL_H_REQ_HEADER_FL",
2619 "IDMA_FL_H_SEND_PCIEHDR",
2620 "IDMA_FL_H_PUSH_CPL_FIFO",
2621 "IDMA_FL_H_SEND_CPL",
2622 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2623 "IDMA_FL_H_SEND_IP_HDR",
2624 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2625 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2626 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2627 "IDMA_FL_D_SEND_PCIEHDR",
2628 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2629 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2630 "IDMA_FL_SEND_PCIEHDR",
2631 "IDMA_FL_PUSH_CPL_FIFO",
2632 "IDMA_FL_SEND_CPL",
2633 "IDMA_FL_SEND_PAYLOAD_FIRST",
2634 "IDMA_FL_SEND_PAYLOAD",
2635 "IDMA_FL_REQ_NEXT_DATA_FL",
2636 "IDMA_FL_SEND_NEXT_PCIEHDR",
2637 "IDMA_FL_SEND_PADDING",
2638 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2639 "IDMA_FL_SEND_FIFO_TO_IMSG",
2640 "IDMA_FL_REQ_DATAFL_DONE",
2641 "IDMA_FL_REQ_HEADERFL_DONE",
2642 };
2643 static const char * const t5_decode[] = {
2644 "IDMA_IDLE",
2645 "IDMA_ALMOST_IDLE",
2646 "IDMA_PUSH_MORE_CPL_FIFO",
2647 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2648 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2649 "IDMA_PHYSADDR_SEND_PCIEHDR",
2650 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2651 "IDMA_PHYSADDR_SEND_PAYLOAD",
2652 "IDMA_SEND_FIFO_TO_IMSG",
2653 "IDMA_FL_REQ_DATA_FL",
2654 "IDMA_FL_DROP",
2655 "IDMA_FL_DROP_SEND_INC",
2656 "IDMA_FL_H_REQ_HEADER_FL",
2657 "IDMA_FL_H_SEND_PCIEHDR",
2658 "IDMA_FL_H_PUSH_CPL_FIFO",
2659 "IDMA_FL_H_SEND_CPL",
2660 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2661 "IDMA_FL_H_SEND_IP_HDR",
2662 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2663 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2664 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2665 "IDMA_FL_D_SEND_PCIEHDR",
2666 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2667 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2668 "IDMA_FL_SEND_PCIEHDR",
2669 "IDMA_FL_PUSH_CPL_FIFO",
2670 "IDMA_FL_SEND_CPL",
2671 "IDMA_FL_SEND_PAYLOAD_FIRST",
2672 "IDMA_FL_SEND_PAYLOAD",
2673 "IDMA_FL_REQ_NEXT_DATA_FL",
2674 "IDMA_FL_SEND_NEXT_PCIEHDR",
2675 "IDMA_FL_SEND_PADDING",
2676 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2677 };
2678 static const u32 sge_regs[] = {
2679 SGE_DEBUG_DATA_LOW_INDEX_2,
2680 SGE_DEBUG_DATA_LOW_INDEX_3,
2681 SGE_DEBUG_DATA_HIGH_INDEX_10,
2682 };
2683 const char **sge_idma_decode;
2684 int sge_idma_decode_nstates;
2685 int i;
2686
2687 if (is_t4(adapter->params.chip)) {
2688 sge_idma_decode = (const char **)t4_decode;
2689 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2690 } else {
2691 sge_idma_decode = (const char **)t5_decode;
2692 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2693 }
2694
2695 if (state < sge_idma_decode_nstates)
2696 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2697 else
2698 CH_WARN(adapter, "idma state %d unknown\n", state);
2699
2700 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2701 CH_WARN(adapter, "SGE register %#x value %#x\n",
2702 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2703}
2704
2705/**
2566 * t4_fw_hello - establish communication with FW 2706 * t4_fw_hello - establish communication with FW
2567 * @adap: the adapter 2707 * @adap: the adapter
2568 * @mbox: mailbox to use for the FW command 2708 * @mbox: mailbox to use for the FW command
@@ -3533,11 +3673,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3533 if (stat & FW_PORT_CMD_TXPAUSE) 3673 if (stat & FW_PORT_CMD_TXPAUSE)
3534 fc |= PAUSE_TX; 3674 fc |= PAUSE_TX;
3535 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 3675 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3536 speed = SPEED_100; 3676 speed = 100;
3537 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 3677 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3538 speed = SPEED_1000; 3678 speed = 1000;
3539 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 3679 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3540 speed = SPEED_10000; 3680 speed = 10000;
3681 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
3682 speed = 40000;
3541 3683
3542 if (link_ok != lc->link_ok || speed != lc->speed || 3684 if (link_ok != lc->link_ok || speed != lc->speed ||
3543 fc != lc->fc) { /* something changed */ 3685 fc != lc->fc) { /* something changed */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index cd6874b571ee..f2738c710789 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -116,6 +116,7 @@ enum CPL_error {
116 CPL_ERR_KEEPALIVE_TIMEDOUT = 34, 116 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
117 CPL_ERR_RTX_NEG_ADVICE = 35, 117 CPL_ERR_RTX_NEG_ADVICE = 35,
118 CPL_ERR_PERSIST_NEG_ADVICE = 36, 118 CPL_ERR_PERSIST_NEG_ADVICE = 36,
119 CPL_ERR_KEEPALV_NEG_ADVICE = 37,
119 CPL_ERR_ABORT_FAILED = 42, 120 CPL_ERR_ABORT_FAILED = 42,
120 CPL_ERR_IWARP_FLM = 50, 121 CPL_ERR_IWARP_FLM = 50,
121}; 122};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 4082522d8140..225ad8a5722d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -230,6 +230,12 @@
230#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) 230#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
231#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) 231#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
232 232
233#define EGRTHRESHOLDPACKING_MASK 0x3fU
234#define EGRTHRESHOLDPACKING_SHIFT 14
235#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
236#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
237 EGRTHRESHOLDPACKING_MASK)
238
233#define SGE_DBFIFO_STATUS 0x10a4 239#define SGE_DBFIFO_STATUS 0x10a4
234#define HP_INT_THRESH_SHIFT 28 240#define HP_INT_THRESH_SHIFT 28
235#define HP_INT_THRESH_MASK 0xfU 241#define HP_INT_THRESH_MASK 0xfU
@@ -278,6 +284,9 @@
278#define SGE_DEBUG_INDEX 0x10cc 284#define SGE_DEBUG_INDEX 0x10cc
279#define SGE_DEBUG_DATA_HIGH 0x10d0 285#define SGE_DEBUG_DATA_HIGH 0x10d0
280#define SGE_DEBUG_DATA_LOW 0x10d4 286#define SGE_DEBUG_DATA_LOW 0x10d4
287#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
288#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
289#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
281#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 290#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
282 291
283#define S_HP_INT_THRESH 28 292#define S_HP_INT_THRESH 28
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 74fea74ce0aa..9cc973fbcf26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
932 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 932 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
933 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 933 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
934 FW_PARAMS_PARAM_DEV_CF = 0x0D, 934 FW_PARAMS_PARAM_DEV_CF = 0x0D,
935 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
935}; 936};
936 937
937/* 938/*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
1742 FW_PORT_TYPE_SFP, 1743 FW_PORT_TYPE_SFP,
1743 FW_PORT_TYPE_BP_AP, 1744 FW_PORT_TYPE_BP_AP,
1744 FW_PORT_TYPE_BP4_AP, 1745 FW_PORT_TYPE_BP4_AP,
1746 FW_PORT_TYPE_QSFP_10G,
1747 FW_PORT_TYPE_QSFP,
1748 FW_PORT_TYPE_BP40_BA,
1745 1749
1746 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK 1750 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
1747}; 1751};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0899c0983594..52859288de7b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
2444 */ 2444 */
2445static int enable_msix(struct adapter *adapter) 2445static int enable_msix(struct adapter *adapter)
2446{ 2446{
2447 int i, err, want, need; 2447 int i, want, need, nqsets;
2448 struct msix_entry entries[MSIX_ENTRIES]; 2448 struct msix_entry entries[MSIX_ENTRIES];
2449 struct sge *s = &adapter->sge; 2449 struct sge *s = &adapter->sge;
2450 2450
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
2460 */ 2460 */
2461 want = s->max_ethqsets + MSIX_EXTRAS; 2461 want = s->max_ethqsets + MSIX_EXTRAS;
2462 need = adapter->params.nports + MSIX_EXTRAS; 2462 need = adapter->params.nports + MSIX_EXTRAS;
2463 while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
2464 want = err;
2465 2463
2466 if (err == 0) { 2464 want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2467 int nqsets = want - MSIX_EXTRAS; 2465 if (want < 0)
2468 if (nqsets < s->max_ethqsets) { 2466 return want;
2469 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" 2467
2470 " for %d Queue Sets\n", nqsets); 2468 nqsets = want - MSIX_EXTRAS;
2471 s->max_ethqsets = nqsets; 2469 if (nqsets < s->max_ethqsets) {
2472 if (nqsets < s->ethqsets) 2470 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2473 reduce_ethqs(adapter, nqsets); 2471 " for %d Queue Sets\n", nqsets);
2474 } 2472 s->max_ethqsets = nqsets;
2475 for (i = 0; i < want; ++i) 2473 if (nqsets < s->ethqsets)
2476 adapter->msix_info[i].vec = entries[i].vector; 2474 reduce_ethqs(adapter, nqsets);
2477 } else if (err > 0) {
2478 pci_disable_msix(adapter->pdev);
2479 dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
2480 " not using MSI-X\n", err);
2481 } 2475 }
2482 return err; 2476 for (i = 0; i < want; ++i)
2477 adapter->msix_info[i].vec = entries[i].vector;
2478
2479 return 0;
2483} 2480}
2484 2481
2485static const struct net_device_ops cxgb4vf_netdev_ops = { 2482static const struct net_device_ops cxgb4vf_netdev_ops = {
@@ -2947,6 +2944,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
2947 CH_DEVICE(0x5811, 0), /* T520-lp-cr */ 2944 CH_DEVICE(0x5811, 0), /* T520-lp-cr */
2948 CH_DEVICE(0x5812, 0), /* T560-cr */ 2945 CH_DEVICE(0x5812, 0), /* T560-cr */
2949 CH_DEVICE(0x5813, 0), /* T580-cr */ 2946 CH_DEVICE(0x5813, 0), /* T580-cr */
2947 CH_DEVICE(0x5814, 0), /* T580-so-cr */
2948 CH_DEVICE(0x5815, 0), /* T502-bt */
2949 CH_DEVICE(0x5880, 0),
2950 CH_DEVICE(0x5881, 0),
2951 CH_DEVICE(0x5882, 0),
2952 CH_DEVICE(0x5883, 0),
2953 CH_DEVICE(0x5884, 0),
2954 CH_DEVICE(0x5885, 0),
2950 { 0, } 2955 { 0, }
2951}; 2956};
2952 2957
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0a89963c48ce..9cfa4b4bb089 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
401 if (sdesc->skb) { 401 if (sdesc->skb) {
402 if (need_unmap) 402 if (need_unmap)
403 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); 403 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
404 kfree_skb(sdesc->skb); 404 dev_consume_skb_any(sdesc->skb);
405 sdesc->skb = NULL; 405 sdesc->skb = NULL;
406 } 406 }
407 407
@@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1275 * need it any longer. 1275 * need it any longer.
1276 */ 1276 */
1277 inline_tx_skb(skb, &txq->q, cpl + 1); 1277 inline_tx_skb(skb, &txq->q, cpl + 1);
1278 dev_kfree_skb(skb); 1278 dev_consume_skb_any(skb);
1279 } else { 1279 } else {
1280 /* 1280 /*
1281 * Write the skb's Scatter/Gather list into the TX Packet CPL 1281 * Write the skb's Scatter/Gather list into the TX Packet CPL
@@ -1354,7 +1354,7 @@ out_free:
1354 * An error of some sort happened. Free the TX skb and tell the 1354 * An error of some sort happened. Free the TX skb and tell the
1355 * OS that we've "dealt" with the packet ... 1355 * OS that we've "dealt" with the packet ...
1356 */ 1356 */
1357 dev_kfree_skb(skb); 1357 dev_kfree_skb_any(skb);
1358 return NETDEV_TX_OK; 1358 return NETDEV_TX_OK;
1359} 1359}
1360 1360
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 19f642a45f40..fe84fbabc0d4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
1174 writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1); 1174 writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
1175 spin_unlock_irqrestore(&lp->lock, flags); 1175 spin_unlock_irqrestore(&lp->lock, flags);
1176 dev->stats.tx_bytes += skb->len; 1176 dev->stats.tx_bytes += skb->len;
1177 dev_kfree_skb(skb); 1177 dev_consume_skb_any(skb);
1178 1178
1179 /* We DO NOT call netif_wake_queue() here. 1179 /* We DO NOT call netif_wake_queue() here.
1180 * We also DO NOT call netif_start_queue(). 1180 * We also DO NOT call netif_start_queue().
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b740bfce72ef..2945718ce806 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
521 unsigned int txq_map; 521 unsigned int txq_map;
522 522
523 if (skb->len <= 0) { 523 if (skb->len <= 0) {
524 dev_kfree_skb(skb); 524 dev_kfree_skb_any(skb);
525 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
526 } 526 }
527 527
@@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
536 if (skb_shinfo(skb)->gso_size == 0 && 536 if (skb_shinfo(skb)->gso_size == 0 &&
537 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && 537 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
538 skb_linearize(skb)) { 538 skb_linearize(skb)) {
539 dev_kfree_skb(skb); 539 dev_kfree_skb_any(skb);
540 return NETDEV_TX_OK; 540 return NETDEV_TX_OK;
541 } 541 }
542 542
@@ -1086,14 +1086,15 @@ static int enic_poll(struct napi_struct *napi, int budget)
1086 unsigned int intr = enic_legacy_io_intr(); 1086 unsigned int intr = enic_legacy_io_intr();
1087 unsigned int rq_work_to_do = budget; 1087 unsigned int rq_work_to_do = budget;
1088 unsigned int wq_work_to_do = -1; /* no limit */ 1088 unsigned int wq_work_to_do = -1; /* no limit */
1089 unsigned int work_done, rq_work_done, wq_work_done; 1089 unsigned int work_done, rq_work_done = 0, wq_work_done;
1090 int err; 1090 int err;
1091 1091
1092 /* Service RQ (first) and WQ 1092 /* Service RQ (first) and WQ
1093 */ 1093 */
1094 1094
1095 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], 1095 if (budget > 0)
1096 rq_work_to_do, enic_rq_service, NULL); 1096 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1097 rq_work_to_do, enic_rq_service, NULL);
1097 1098
1098 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], 1099 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1099 wq_work_to_do, enic_wq_service, NULL); 1100 wq_work_to_do, enic_wq_service, NULL);
@@ -1141,14 +1142,15 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1141 unsigned int cq = enic_cq_rq(enic, rq); 1142 unsigned int cq = enic_cq_rq(enic, rq);
1142 unsigned int intr = enic_msix_rq_intr(enic, rq); 1143 unsigned int intr = enic_msix_rq_intr(enic, rq);
1143 unsigned int work_to_do = budget; 1144 unsigned int work_to_do = budget;
1144 unsigned int work_done; 1145 unsigned int work_done = 0;
1145 int err; 1146 int err;
1146 1147
1147 /* Service RQ 1148 /* Service RQ
1148 */ 1149 */
1149 1150
1150 work_done = vnic_cq_service(&enic->cq[cq], 1151 if (budget > 0)
1151 work_to_do, enic_rq_service, NULL); 1152 work_done = vnic_cq_service(&enic->cq[cq],
1153 work_to_do, enic_rq_service, NULL);
1152 1154
1153 /* Return intr event credits for this polling 1155 /* Return intr event credits for this polling
1154 * cycle. An intr event is the completion of a 1156 * cycle. An intr event is the completion of a
@@ -1796,7 +1798,8 @@ static int enic_set_intr_mode(struct enic *enic)
1796 enic->cq_count >= n + m && 1798 enic->cq_count >= n + m &&
1797 enic->intr_count >= n + m + 2) { 1799 enic->intr_count >= n + m + 2) {
1798 1800
1799 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { 1801 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1802 n + m + 2, n + m + 2) > 0) {
1800 1803
1801 enic->rq_count = n; 1804 enic->rq_count = n;
1802 enic->wq_count = m; 1805 enic->wq_count = m;
@@ -1815,7 +1818,8 @@ static int enic_set_intr_mode(struct enic *enic)
1815 enic->wq_count >= m && 1818 enic->wq_count >= m &&
1816 enic->cq_count >= 1 + m && 1819 enic->cq_count >= 1 + m &&
1817 enic->intr_count >= 1 + m + 2) { 1820 enic->intr_count >= 1 + m + 2) {
1818 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) { 1821 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1822 1 + m + 2, 1 + m + 2) > 0) {
1819 1823
1820 enic->rq_count = 1; 1824 enic->rq_count = 1;
1821 enic->wq_count = m; 1825 enic->wq_count = m;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a1a2b4028a5c..8c4b93be333b 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1033 spin_unlock_irqrestore(&db->lock, flags); 1033 spin_unlock_irqrestore(&db->lock, flags);
1034 1034
1035 /* free this SKB */ 1035 /* free this SKB */
1036 dev_kfree_skb(skb); 1036 dev_consume_skb_any(skb);
1037 1037
1038 return NETDEV_TX_OK; 1038 return NETDEV_TX_OK;
1039} 1039}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 5ad9e3e3c0b8..53f0c618045c 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -696,7 +696,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
696 /* Too large packet check */ 696 /* Too large packet check */
697 if (skb->len > MAX_PACKET_SIZE) { 697 if (skb->len > MAX_PACKET_SIZE) {
698 pr_err("big packet = %d\n", (u16)skb->len); 698 pr_err("big packet = %d\n", (u16)skb->len);
699 dev_kfree_skb(skb); 699 dev_kfree_skb_any(skb);
700 return NETDEV_TX_OK; 700 return NETDEV_TX_OK;
701 } 701 }
702 702
@@ -743,7 +743,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
743 dw32(DCR7, db->cr7_data); 743 dw32(DCR7, db->cr7_data);
744 744
745 /* free this SKB */ 745 /* free this SKB */
746 dev_kfree_skb(skb); 746 dev_consume_skb_any(skb);
747 747
748 return NETDEV_TX_OK; 748 return NETDEV_TX_OK;
749} 749}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa4ee385091f..aa801a6af7b9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -607,7 +607,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
607 /* Too large packet check */ 607 /* Too large packet check */
608 if (skb->len > MAX_PACKET_SIZE) { 608 if (skb->len > MAX_PACKET_SIZE) {
609 netdev_err(dev, "big packet = %d\n", (u16)skb->len); 609 netdev_err(dev, "big packet = %d\n", (u16)skb->len);
610 dev_kfree_skb(skb); 610 dev_kfree_skb_any(skb);
611 return NETDEV_TX_OK; 611 return NETDEV_TX_OK;
612 } 612 }
613 613
@@ -648,7 +648,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
648 uw32(DCR7, db->cr7_data); 648 uw32(DCR7, db->cr7_data);
649 649
650 /* free this SKB */ 650 /* free this SKB */
651 dev_kfree_skb(skb); 651 dev_consume_skb_any(skb);
652 652
653 return NETDEV_TX_OK; 653 return NETDEV_TX_OK;
654} 654}
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 113cd799a131..d9e5ca0d48c1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1137,7 +1137,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1137 return NETDEV_TX_OK; 1137 return NETDEV_TX_OK;
1138 1138
1139drop_frame: 1139drop_frame:
1140 dev_kfree_skb(skb); 1140 dev_kfree_skb_any(skb);
1141 np->tx_skbuff[entry] = NULL; 1141 np->tx_skbuff[entry] = NULL;
1142 dev->stats.tx_dropped++; 1142 dev->stats.tx_dropped++;
1143 return NETDEV_TX_OK; 1143 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 8a79a32a5674..e9b0faba3078 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -170,11 +170,6 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
170 return 0; 170 return 0;
171} 171}
172 172
173static int dnet_mdio_reset(struct mii_bus *bus)
174{
175 return 0;
176}
177
178static void dnet_handle_link_change(struct net_device *dev) 173static void dnet_handle_link_change(struct net_device *dev)
179{ 174{
180 struct dnet *bp = netdev_priv(dev); 175 struct dnet *bp = netdev_priv(dev);
@@ -322,7 +317,6 @@ static int dnet_mii_init(struct dnet *bp)
322 bp->mii_bus->name = "dnet_mii_bus"; 317 bp->mii_bus->name = "dnet_mii_bus";
323 bp->mii_bus->read = &dnet_mdio_read; 318 bp->mii_bus->read = &dnet_mdio_read;
324 bp->mii_bus->write = &dnet_mdio_write; 319 bp->mii_bus->write = &dnet_mdio_write;
325 bp->mii_bus->reset = &dnet_mdio_reset;
326 320
327 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 321 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
328 bp->pdev->name, bp->pdev->id); 322 bp->pdev->name, bp->pdev->id);
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 231129dd1764..ea94a8eb6b35 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -4,3 +4,11 @@ config BE2NET
4 ---help--- 4 ---help---
5 This driver implements the NIC functionality for ServerEngines' 5 This driver implements the NIC functionality for ServerEngines'
6 10Gbps network adapter - BladeEngine. 6 10Gbps network adapter - BladeEngine.
7
8config BE2NET_VXLAN
9 bool "VXLAN offload support on be2net driver"
10 default y
11 depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m)
12 ---help---
13 Say Y here if you want to enable VXLAN offload support on
14 be2net driver.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 05529e273050..8ccaa2520dc3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "10.0.600.0u" 37#define DRV_VER "10.2u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "Emulex BladeEngine2" 39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "Emulex BladeEngine3" 40#define BE3_NAME "Emulex BladeEngine3"
@@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev)
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15
92#define BE_MAX_EQD 128u 91#define BE_MAX_EQD 128u
93#define BE_MAX_TX_FRAG_COUNT 30 92#define BE_MAX_TX_FRAG_COUNT 30
94 93
@@ -262,9 +261,10 @@ struct be_tx_obj {
262/* Struct to remember the pages posted for rx frags */ 261/* Struct to remember the pages posted for rx frags */
263struct be_rx_page_info { 262struct be_rx_page_info {
264 struct page *page; 263 struct page *page;
264 /* set to page-addr for last frag of the page & frag-addr otherwise */
265 DEFINE_DMA_UNMAP_ADDR(bus); 265 DEFINE_DMA_UNMAP_ADDR(bus);
266 u16 page_offset; 266 u16 page_offset;
267 bool last_page_user; 267 bool last_frag; /* last frag of the page */
268}; 268};
269 269
270struct be_rx_stats { 270struct be_rx_stats {
@@ -293,9 +293,10 @@ struct be_rx_compl_info {
293 u8 ip_csum; 293 u8 ip_csum;
294 u8 l4_csum; 294 u8 l4_csum;
295 u8 ipv6; 295 u8 ipv6;
296 u8 vtm; 296 u8 qnq;
297 u8 pkt_type; 297 u8 pkt_type;
298 u8 ip_frag; 298 u8 ip_frag;
299 u8 tunneled;
299}; 300};
300 301
301struct be_rx_obj { 302struct be_rx_obj {
@@ -359,6 +360,7 @@ struct be_vf_cfg {
359 int pmac_id; 360 int pmac_id;
360 u16 vlan_tag; 361 u16 vlan_tag;
361 u32 tx_rate; 362 u32 tx_rate;
363 u32 plink_tracking;
362}; 364};
363 365
364enum vf_state { 366enum vf_state {
@@ -370,10 +372,11 @@ enum vf_state {
370#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 372#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
371#define BE_FLAGS_VLAN_PROMISC (1 << 4) 373#define BE_FLAGS_VLAN_PROMISC (1 << 4)
372#define BE_FLAGS_NAPI_ENABLED (1 << 9) 374#define BE_FLAGS_NAPI_ENABLED (1 << 9)
373#define BE_UC_PMAC_COUNT 30
374#define BE_VF_UC_PMAC_COUNT 2
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
376 377
378#define BE_UC_PMAC_COUNT 30
379#define BE_VF_UC_PMAC_COUNT 2
377/* Ethtool set_dump flags */ 380/* Ethtool set_dump flags */
378#define LANCER_INITIATE_FW_DUMP 0x1 381#define LANCER_INITIATE_FW_DUMP 0x1
379 382
@@ -467,6 +470,7 @@ struct be_adapter {
467 470
468 u32 port_num; 471 u32 port_num;
469 bool promiscuous; 472 bool promiscuous;
473 u8 mc_type;
470 u32 function_mode; 474 u32 function_mode;
471 u32 function_caps; 475 u32 function_caps;
472 u32 rx_fc; /* Rx flow control */ 476 u32 rx_fc; /* Rx flow control */
@@ -492,6 +496,7 @@ struct be_adapter {
492 u32 sli_family; 496 u32 sli_family;
493 u8 hba_port_num; 497 u8 hba_port_num;
494 u16 pvid; 498 u16 pvid;
499 __be16 vxlan_port;
495 struct phy_info phy; 500 struct phy_info phy;
496 u8 wol_cap; 501 u8 wol_cap;
497 bool wol_en; 502 bool wol_en;
@@ -536,6 +541,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
536 return min_t(u16, num, num_online_cpus()); 541 return min_t(u16, num, num_online_cpus());
537} 542}
538 543
544/* Is BE in pvid_tagging mode */
545#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
546
547/* Is BE in QNQ multi-channel mode */
548#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
549 adapter->mc_type == vNIC1 || \
550 adapter->mc_type == UFP)
551
539#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ 552#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
540 adapter->pdev->device == OC_DEVICE_ID4) 553 adapter->pdev->device == OC_DEVICE_ID4)
541 554
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 48076a6370c3..d1ec15af0d24 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -202,8 +202,12 @@ static void be_async_link_state_process(struct be_adapter *adapter,
202 /* When link status changes, link speed must be re-queried from FW */ 202 /* When link status changes, link speed must be re-queried from FW */
203 adapter->phy.link_speed = -1; 203 adapter->phy.link_speed = -1;
204 204
205 /* Ignore physical link event */ 205 /* On BEx the FW does not send a separate link status
206 if (lancer_chip(adapter) && 206 * notification for physical and logical link.
207 * On other chips just process the logical link
208 * status notification
209 */
210 if (!BEx_chip(adapter) &&
207 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 211 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
208 return; 212 return;
209 213
@@ -211,7 +215,8 @@ static void be_async_link_state_process(struct be_adapter *adapter,
211 * it may not be received in some cases. 215 * it may not be received in some cases.
212 */ 216 */
213 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 217 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
214 be_link_status_update(adapter, evt->port_link_status); 218 be_link_status_update(adapter,
219 evt->port_link_status & LINK_STATUS_MASK);
215} 220}
216 221
217/* Grp5 CoS Priority evt */ 222/* Grp5 CoS Priority evt */
@@ -239,10 +244,12 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
239static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 244static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
240 struct be_async_event_grp5_pvid_state *evt) 245 struct be_async_event_grp5_pvid_state *evt)
241{ 246{
242 if (evt->enabled) 247 if (evt->enabled) {
243 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 248 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
244 else 249 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
250 } else {
245 adapter->pvid = 0; 251 adapter->pvid = 0;
252 }
246} 253}
247 254
248static void be_async_grp5_evt_process(struct be_adapter *adapter, 255static void be_async_grp5_evt_process(struct be_adapter *adapter,
@@ -3296,6 +3303,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3296 return NULL; 3303 return NULL;
3297} 3304}
3298 3305
3306static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3307{
3308 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3309 int i;
3310
3311 for (i = 0; i < desc_count; i++) {
3312 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3313 return (struct be_port_res_desc *)hdr;
3314
3315 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3316 hdr = (void *)hdr + hdr->desc_len;
3317 }
3318 return NULL;
3319}
3320
3299static void be_copy_nic_desc(struct be_resources *res, 3321static void be_copy_nic_desc(struct be_resources *res,
3300 struct be_nic_res_desc *desc) 3322 struct be_nic_res_desc *desc)
3301{ 3323{
@@ -3439,6 +3461,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3439{ 3461{
3440 struct be_cmd_resp_get_profile_config *resp; 3462 struct be_cmd_resp_get_profile_config *resp;
3441 struct be_pcie_res_desc *pcie; 3463 struct be_pcie_res_desc *pcie;
3464 struct be_port_res_desc *port;
3442 struct be_nic_res_desc *nic; 3465 struct be_nic_res_desc *nic;
3443 struct be_queue_info *mccq = &adapter->mcc_obj.q; 3466 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3444 struct be_dma_mem cmd; 3467 struct be_dma_mem cmd;
@@ -3466,6 +3489,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3466 if (pcie) 3489 if (pcie)
3467 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3490 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3468 3491
3492 port = be_get_port_desc(resp->func_param, desc_count);
3493 if (port)
3494 adapter->mc_type = port->mc_type;
3495
3469 nic = be_get_nic_desc(resp->func_param, desc_count); 3496 nic = be_get_nic_desc(resp->func_param, desc_count);
3470 if (nic) 3497 if (nic)
3471 be_copy_nic_desc(res, nic); 3498 be_copy_nic_desc(res, nic);
@@ -3476,14 +3503,11 @@ err:
3476 return status; 3503 return status;
3477} 3504}
3478 3505
3479/* Currently only Lancer uses this command and it supports version 0 only 3506int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3480 * Uses sync mcc 3507 int size, u8 version, u8 domain)
3481 */
3482int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3483 u8 domain)
3484{ 3508{
3485 struct be_mcc_wrb *wrb;
3486 struct be_cmd_req_set_profile_config *req; 3509 struct be_cmd_req_set_profile_config *req;
3510 struct be_mcc_wrb *wrb;
3487 int status; 3511 int status;
3488 3512
3489 spin_lock_bh(&adapter->mcc_lock); 3513 spin_lock_bh(&adapter->mcc_lock);
@@ -3495,44 +3519,116 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3495 } 3519 }
3496 3520
3497 req = embedded_payload(wrb); 3521 req = embedded_payload(wrb);
3498
3499 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3522 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3500 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req), 3523 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3501 wrb, NULL); 3524 wrb, NULL);
3525 req->hdr.version = version;
3502 req->hdr.domain = domain; 3526 req->hdr.domain = domain;
3503 req->desc_count = cpu_to_le32(1); 3527 req->desc_count = cpu_to_le32(1);
3504 req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3528 memcpy(req->desc, desc, size);
3505 req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3529
3506 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3507 req->nic_desc.pf_num = adapter->pf_number;
3508 req->nic_desc.vf_num = domain;
3509
3510 /* Mark fields invalid */
3511 req->nic_desc.unicast_mac_count = 0xFFFF;
3512 req->nic_desc.mcc_count = 0xFFFF;
3513 req->nic_desc.vlan_count = 0xFFFF;
3514 req->nic_desc.mcast_mac_count = 0xFFFF;
3515 req->nic_desc.txq_count = 0xFFFF;
3516 req->nic_desc.rq_count = 0xFFFF;
3517 req->nic_desc.rssq_count = 0xFFFF;
3518 req->nic_desc.lro_count = 0xFFFF;
3519 req->nic_desc.cq_count = 0xFFFF;
3520 req->nic_desc.toe_conn_count = 0xFFFF;
3521 req->nic_desc.eq_count = 0xFFFF;
3522 req->nic_desc.link_param = 0xFF;
3523 req->nic_desc.bw_min = 0xFFFFFFFF;
3524 req->nic_desc.acpi_params = 0xFF;
3525 req->nic_desc.wol_param = 0x0F;
3526
3527 /* Change BW */
3528 req->nic_desc.bw_min = cpu_to_le32(bps);
3529 req->nic_desc.bw_max = cpu_to_le32(bps);
3530 status = be_mcc_notify_wait(adapter); 3530 status = be_mcc_notify_wait(adapter);
3531err: 3531err:
3532 spin_unlock_bh(&adapter->mcc_lock); 3532 spin_unlock_bh(&adapter->mcc_lock);
3533 return status; 3533 return status;
3534} 3534}
3535 3535
3536/* Mark all fields invalid */
3537void be_reset_nic_desc(struct be_nic_res_desc *nic)
3538{
3539 memset(nic, 0, sizeof(*nic));
3540 nic->unicast_mac_count = 0xFFFF;
3541 nic->mcc_count = 0xFFFF;
3542 nic->vlan_count = 0xFFFF;
3543 nic->mcast_mac_count = 0xFFFF;
3544 nic->txq_count = 0xFFFF;
3545 nic->rq_count = 0xFFFF;
3546 nic->rssq_count = 0xFFFF;
3547 nic->lro_count = 0xFFFF;
3548 nic->cq_count = 0xFFFF;
3549 nic->toe_conn_count = 0xFFFF;
3550 nic->eq_count = 0xFFFF;
3551 nic->link_param = 0xFF;
3552 nic->acpi_params = 0xFF;
3553 nic->wol_param = 0x0F;
3554 nic->bw_min = 0xFFFFFFFF;
3555 nic->bw_max = 0xFFFFFFFF;
3556}
3557
3558int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
3559{
3560 if (lancer_chip(adapter)) {
3561 struct be_nic_res_desc nic_desc;
3562
3563 be_reset_nic_desc(&nic_desc);
3564 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3565 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3566 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3567 (1 << NOSV_SHIFT);
3568 nic_desc.pf_num = adapter->pf_number;
3569 nic_desc.vf_num = domain;
3570 nic_desc.bw_max = cpu_to_le32(bps);
3571
3572 return be_cmd_set_profile_config(adapter, &nic_desc,
3573 RESOURCE_DESC_SIZE_V0,
3574 0, domain);
3575 } else {
3576 return be_cmd_set_qos(adapter, bps, domain);
3577 }
3578}
3579
3580int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3581{
3582 struct be_mcc_wrb *wrb;
3583 struct be_cmd_req_manage_iface_filters *req;
3584 int status;
3585
3586 if (iface == 0xFFFFFFFF)
3587 return -1;
3588
3589 spin_lock_bh(&adapter->mcc_lock);
3590
3591 wrb = wrb_from_mccq(adapter);
3592 if (!wrb) {
3593 status = -EBUSY;
3594 goto err;
3595 }
3596 req = embedded_payload(wrb);
3597
3598 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3599 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3600 wrb, NULL);
3601 req->op = op;
3602 req->target_iface_id = cpu_to_le32(iface);
3603
3604 status = be_mcc_notify_wait(adapter);
3605err:
3606 spin_unlock_bh(&adapter->mcc_lock);
3607 return status;
3608}
3609
3610int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3611{
3612 struct be_port_res_desc port_desc;
3613
3614 memset(&port_desc, 0, sizeof(port_desc));
3615 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3616 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3617 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3618 port_desc.link_num = adapter->hba_port_num;
3619 if (port) {
3620 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3621 (1 << RCVID_SHIFT);
3622 port_desc.nv_port = swab16(port);
3623 } else {
3624 port_desc.nv_flags = NV_TYPE_DISABLED;
3625 port_desc.nv_port = 0;
3626 }
3627
3628 return be_cmd_set_profile_config(adapter, &port_desc,
3629 RESOURCE_DESC_SIZE_V1, 1, 0);
3630}
3631
3536int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 3632int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3537 int vf_num) 3633 int vf_num)
3538{ 3634{
@@ -3723,6 +3819,45 @@ err:
3723 return status; 3819 return status;
3724} 3820}
3725 3821
3822int be_cmd_set_logical_link_config(struct be_adapter *adapter,
3823 int link_state, u8 domain)
3824{
3825 struct be_mcc_wrb *wrb;
3826 struct be_cmd_req_set_ll_link *req;
3827 int status;
3828
3829 if (BEx_chip(adapter) || lancer_chip(adapter))
3830 return 0;
3831
3832 spin_lock_bh(&adapter->mcc_lock);
3833
3834 wrb = wrb_from_mccq(adapter);
3835 if (!wrb) {
3836 status = -EBUSY;
3837 goto err;
3838 }
3839
3840 req = embedded_payload(wrb);
3841
3842 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3843 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
3844 sizeof(*req), wrb, NULL);
3845
3846 req->hdr.version = 1;
3847 req->hdr.domain = domain;
3848
3849 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
3850 req->link_config |= 1;
3851
3852 if (link_state == IFLA_VF_LINK_STATE_AUTO)
3853 req->link_config |= 1 << PLINK_TRACK_SHIFT;
3854
3855 status = be_mcc_notify_wait(adapter);
3856err:
3857 spin_unlock_bh(&adapter->mcc_lock);
3858 return status;
3859}
3860
3726int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3861int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3727 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3862 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3728{ 3863{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index fc4e076dc202..b60e4d53c1c9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
203#define OPCODE_COMMON_GET_BEACON_STATE 70 203#define OPCODE_COMMON_GET_BEACON_STATE 70
204#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 204#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
205#define OPCODE_COMMON_GET_PORT_NAME 77 205#define OPCODE_COMMON_GET_PORT_NAME 77
206#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG 80
206#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 207#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
207#define OPCODE_COMMON_SET_FN_PRIVILEGES 100 208#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
208#define OPCODE_COMMON_GET_PHY_DETAILS 102 209#define OPCODE_COMMON_GET_PHY_DETAILS 102
@@ -221,6 +222,7 @@ struct be_mcc_mailbox {
221#define OPCODE_COMMON_GET_FN_PRIVILEGES 170 222#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
222#define OPCODE_COMMON_READ_OBJECT 171 223#define OPCODE_COMMON_READ_OBJECT 171
223#define OPCODE_COMMON_WRITE_OBJECT 172 224#define OPCODE_COMMON_WRITE_OBJECT 172
225#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193
224#define OPCODE_COMMON_GET_IFACE_LIST 194 226#define OPCODE_COMMON_GET_IFACE_LIST 194
225#define OPCODE_COMMON_ENABLE_DISABLE_VF 196 227#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
226 228
@@ -1098,14 +1100,6 @@ struct be_cmd_resp_query_fw_cfg {
1098 u32 function_caps; 1100 u32 function_caps;
1099}; 1101};
1100 1102
1101/* Is BE in a multi-channel mode */
1102static inline bool be_is_mc(struct be_adapter *adapter)
1103{
1104 return adapter->function_mode & FLEX10_MODE ||
1105 adapter->function_mode & VNIC_MODE ||
1106 adapter->function_mode & UMC_ENABLED;
1107}
1108
1109/******************** RSS Config ****************************************/ 1103/******************** RSS Config ****************************************/
1110/* RSS type Input parameters used to compute RX hash 1104/* RSS type Input parameters used to compute RX hash
1111 * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4 1105 * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1828,20 +1822,36 @@ struct be_cmd_req_set_ext_fat_caps {
1828#define NIC_RESOURCE_DESC_TYPE_V0 0x41 1822#define NIC_RESOURCE_DESC_TYPE_V0 0x41
1829#define PCIE_RESOURCE_DESC_TYPE_V1 0x50 1823#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
1830#define NIC_RESOURCE_DESC_TYPE_V1 0x51 1824#define NIC_RESOURCE_DESC_TYPE_V1 0x51
1825#define PORT_RESOURCE_DESC_TYPE_V1 0x55
1831#define MAX_RESOURCE_DESC 264 1826#define MAX_RESOURCE_DESC 264
1832 1827
1833/* QOS unit number */ 1828#define IMM_SHIFT 6 /* Immediate */
1834#define QUN 4 1829#define NOSV_SHIFT 7 /* No save */
1835/* Immediate */
1836#define IMM 6
1837/* No save */
1838#define NOSV 7
1839 1830
1840struct be_res_desc_hdr { 1831struct be_res_desc_hdr {
1841 u8 desc_type; 1832 u8 desc_type;
1842 u8 desc_len; 1833 u8 desc_len;
1843} __packed; 1834} __packed;
1844 1835
1836struct be_port_res_desc {
1837 struct be_res_desc_hdr hdr;
1838 u8 rsvd0;
1839 u8 flags;
1840 u8 link_num;
1841 u8 mc_type;
1842 u16 rsvd1;
1843
1844#define NV_TYPE_MASK 0x3 /* bits 0-1 */
1845#define NV_TYPE_DISABLED 1
1846#define NV_TYPE_VXLAN 3
1847#define SOCVID_SHIFT 2 /* Strip outer vlan */
1848#define RCVID_SHIFT 4 /* Report vlan */
1849 u8 nv_flags;
1850 u8 rsvd2;
1851 __le16 nv_port; /* vxlan/gre port */
1852 u32 rsvd3[19];
1853} __packed;
1854
1845struct be_pcie_res_desc { 1855struct be_pcie_res_desc {
1846 struct be_res_desc_hdr hdr; 1856 struct be_res_desc_hdr hdr;
1847 u8 rsvd0; 1857 u8 rsvd0;
@@ -1862,6 +1872,8 @@ struct be_pcie_res_desc {
1862struct be_nic_res_desc { 1872struct be_nic_res_desc {
1863 struct be_res_desc_hdr hdr; 1873 struct be_res_desc_hdr hdr;
1864 u8 rsvd1; 1874 u8 rsvd1;
1875
1876#define QUN_SHIFT 4 /* QoS is in absolute units */
1865 u8 flags; 1877 u8 flags;
1866 u8 vf_num; 1878 u8 vf_num;
1867 u8 rsvd2; 1879 u8 rsvd2;
@@ -1891,6 +1903,23 @@ struct be_nic_res_desc {
1891 u32 rsvd8[7]; 1903 u32 rsvd8[7];
1892} __packed; 1904} __packed;
1893 1905
1906/************ Multi-Channel type ***********/
1907enum mc_type {
1908 MC_NONE = 0x01,
1909 UMC = 0x02,
1910 FLEX10 = 0x03,
1911 vNIC1 = 0x04,
1912 nPAR = 0x05,
1913 UFP = 0x06,
1914 vNIC2 = 0x07
1915};
1916
1917/* Is BE in a multi-channel mode */
1918static inline bool be_is_mc(struct be_adapter *adapter)
1919{
1920 return adapter->mc_type > MC_NONE;
1921}
1922
1894struct be_cmd_req_get_func_config { 1923struct be_cmd_req_get_func_config {
1895 struct be_cmd_req_hdr hdr; 1924 struct be_cmd_req_hdr hdr;
1896}; 1925};
@@ -1919,7 +1948,7 @@ struct be_cmd_req_set_profile_config {
1919 struct be_cmd_req_hdr hdr; 1948 struct be_cmd_req_hdr hdr;
1920 u32 rsvd; 1949 u32 rsvd;
1921 u32 desc_count; 1950 u32 desc_count;
1922 struct be_nic_res_desc nic_desc; 1951 u8 desc[RESOURCE_DESC_SIZE_V1];
1923}; 1952};
1924 1953
1925struct be_cmd_resp_set_profile_config { 1954struct be_cmd_resp_set_profile_config {
@@ -1971,6 +2000,33 @@ struct be_cmd_resp_get_iface_list {
1971 struct be_if_desc if_desc; 2000 struct be_if_desc if_desc;
1972}; 2001};
1973 2002
2003/*************** Set logical link ********************/
2004#define PLINK_TRACK_SHIFT 8
2005struct be_cmd_req_set_ll_link {
2006 struct be_cmd_req_hdr hdr;
2007 u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */
2008};
2009
2010/************** Manage IFACE Filters *******************/
2011#define OP_CONVERT_NORMAL_TO_TUNNEL 0
2012#define OP_CONVERT_TUNNEL_TO_NORMAL 1
2013
2014struct be_cmd_req_manage_iface_filters {
2015 struct be_cmd_req_hdr hdr;
2016 u8 op;
2017 u8 rsvd0;
2018 u8 flags;
2019 u8 rsvd1;
2020 u32 tunnel_iface_id;
2021 u32 target_iface_id;
2022 u8 mac[6];
2023 u16 vlan_tag;
2024 u32 tenant_id;
2025 u32 filter_id;
2026 u32 cap_flags;
2027 u32 cap_control_flags;
2028} __packed;
2029
1974int be_pci_fnum_get(struct be_adapter *adapter); 2030int be_pci_fnum_get(struct be_adapter *adapter);
1975int be_fw_wait_ready(struct be_adapter *adapter); 2031int be_fw_wait_ready(struct be_adapter *adapter);
1976int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 2032int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2045,7 +2101,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2045int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2101int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2046 u8 loopback_type, u8 enable); 2102 u8 loopback_type, u8 enable);
2047int be_cmd_get_phy_info(struct be_adapter *adapter); 2103int be_cmd_get_phy_info(struct be_adapter *adapter);
2048int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 2104int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
2049void be_detect_error(struct be_adapter *adapter); 2105void be_detect_error(struct be_adapter *adapter);
2050int be_cmd_get_die_temperature(struct be_adapter *adapter); 2106int be_cmd_get_die_temperature(struct be_adapter *adapter);
2051int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 2107int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
@@ -2086,9 +2142,14 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
2086 struct be_resources *res); 2142 struct be_resources *res);
2087int be_cmd_get_profile_config(struct be_adapter *adapter, 2143int be_cmd_get_profile_config(struct be_adapter *adapter,
2088 struct be_resources *res, u8 domain); 2144 struct be_resources *res, u8 domain);
2089int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain); 2145int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
2146 int size, u8 version, u8 domain);
2090int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); 2147int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
2091int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 2148int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
2092 int vf_num); 2149 int vf_num);
2093int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); 2150int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
2094int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); 2151int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
2152int be_cmd_set_logical_link_config(struct be_adapter *adapter,
2153 int link_state, u8 domain);
2154int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
2155int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 05be0070f55f..15ba96cba65d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -357,10 +357,10 @@ be_get_ethtool_stats(struct net_device *netdev,
357 struct be_rx_stats *stats = rx_stats(rxo); 357 struct be_rx_stats *stats = rx_stats(rxo);
358 358
359 do { 359 do {
360 start = u64_stats_fetch_begin_bh(&stats->sync); 360 start = u64_stats_fetch_begin_irq(&stats->sync);
361 data[base] = stats->rx_bytes; 361 data[base] = stats->rx_bytes;
362 data[base + 1] = stats->rx_pkts; 362 data[base + 1] = stats->rx_pkts;
363 } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 363 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
364 364
365 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { 365 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
366 p = (u8 *)stats + et_rx_stats[i].offset; 366 p = (u8 *)stats + et_rx_stats[i].offset;
@@ -373,19 +373,19 @@ be_get_ethtool_stats(struct net_device *netdev,
373 struct be_tx_stats *stats = tx_stats(txo); 373 struct be_tx_stats *stats = tx_stats(txo);
374 374
375 do { 375 do {
376 start = u64_stats_fetch_begin_bh(&stats->sync_compl); 376 start = u64_stats_fetch_begin_irq(&stats->sync_compl);
377 data[base] = stats->tx_compl; 377 data[base] = stats->tx_compl;
378 } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); 378 } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
379 379
380 do { 380 do {
381 start = u64_stats_fetch_begin_bh(&stats->sync); 381 start = u64_stats_fetch_begin_irq(&stats->sync);
382 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { 382 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
383 p = (u8 *)stats + et_tx_stats[i].offset; 383 p = (u8 *)stats + et_tx_stats[i].offset;
384 data[base + i] = 384 data[base + i] =
385 (et_tx_stats[i].size == sizeof(u64)) ? 385 (et_tx_stats[i].size == sizeof(u64)) ?
386 *(u64 *)p : *(u32 *)p; 386 *(u64 *)p : *(u32 *)p;
387 } 387 }
388 } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 388 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
389 base += ETHTOOL_TXSTATS_NUM; 389 base += ETHTOOL_TXSTATS_NUM;
390 } 390 }
391} 391}
@@ -802,16 +802,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
802 802
803 if (test->flags & ETH_TEST_FL_OFFLINE) { 803 if (test->flags & ETH_TEST_FL_OFFLINE) {
804 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 804 if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
805 &data[0]) != 0) { 805 &data[0]) != 0)
806 test->flags |= ETH_TEST_FL_FAILED; 806 test->flags |= ETH_TEST_FL_FAILED;
807 } 807
808 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 808 if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
809 &data[1]) != 0) { 809 &data[1]) != 0)
810 test->flags |= ETH_TEST_FL_FAILED;
811 }
812 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
813 &data[2]) != 0) {
814 test->flags |= ETH_TEST_FL_FAILED; 810 test->flags |= ETH_TEST_FL_FAILED;
811
812 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
813 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
814 &data[2]) != 0)
815 test->flags |= ETH_TEST_FL_FAILED;
816 test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
815 } 817 }
816 } 818 }
817 819
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index dc88782185f2..3bd198550edb 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 {
368 u8 numfrags[3]; /* dword 1 */ 368 u8 numfrags[3]; /* dword 1 */
369 u8 rss_flush; /* dword 2 */ 369 u8 rss_flush; /* dword 2 */
370 u8 cast_enc[2]; /* dword 2 */ 370 u8 cast_enc[2]; /* dword 2 */
371 u8 vtm; /* dword 2 */ 371 u8 qnq; /* dword 2 */
372 u8 rss_bank; /* dword 2 */ 372 u8 rss_bank; /* dword 2 */
373 u8 rsvd1[23]; /* dword 2 */ 373 u8 rsvd1[23]; /* dword 2 */
374 u8 lro_pkt; /* dword 2 */ 374 u8 lro_pkt; /* dword 2 */
@@ -401,13 +401,14 @@ struct amap_eth_rx_compl_v1 {
401 u8 numfrags[3]; /* dword 1 */ 401 u8 numfrags[3]; /* dword 1 */
402 u8 rss_flush; /* dword 2 */ 402 u8 rss_flush; /* dword 2 */
403 u8 cast_enc[2]; /* dword 2 */ 403 u8 cast_enc[2]; /* dword 2 */
404 u8 vtm; /* dword 2 */ 404 u8 qnq; /* dword 2 */
405 u8 rss_bank; /* dword 2 */ 405 u8 rss_bank; /* dword 2 */
406 u8 port[2]; /* dword 2 */ 406 u8 port[2]; /* dword 2 */
407 u8 vntagp; /* dword 2 */ 407 u8 vntagp; /* dword 2 */
408 u8 header_len[8]; /* dword 2 */ 408 u8 header_len[8]; /* dword 2 */
409 u8 header_split[2]; /* dword 2 */ 409 u8 header_split[2]; /* dword 2 */
410 u8 rsvd1[13]; /* dword 2 */ 410 u8 rsvd1[12]; /* dword 2 */
411 u8 tunneled;
411 u8 valid; /* dword 2 */ 412 u8 valid; /* dword 2 */
412 u8 rsshash[32]; /* dword 3 */ 413 u8 rsshash[32]; /* dword 3 */
413} __packed; 414} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 36c80612e21a..3e6df47b6973 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
23#include <linux/aer.h> 23#include <linux/aer.h>
24#include <linux/if_bridge.h> 24#include <linux/if_bridge.h>
25#include <net/busy_poll.h> 25#include <net/busy_poll.h>
26#include <net/vxlan.h>
26 27
27MODULE_VERSION(DRV_VER); 28MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids); 29MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -591,10 +592,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
591 for_all_rx_queues(adapter, rxo, i) { 592 for_all_rx_queues(adapter, rxo, i) {
592 const struct be_rx_stats *rx_stats = rx_stats(rxo); 593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do { 594 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync); 595 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts; 596 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes; 597 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start)); 598 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
598 stats->rx_packets += pkts; 599 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes; 600 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts; 601 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -605,10 +606,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
605 for_all_tx_queues(adapter, txo, i) { 606 for_all_tx_queues(adapter, txo, i) {
606 const struct be_tx_stats *tx_stats = tx_stats(txo); 607 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do { 608 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync); 609 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts; 610 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes; 611 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start)); 612 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
612 stats->tx_packets += pkts; 613 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes; 614 stats->tx_bytes += bytes;
614 } 615 }
@@ -652,7 +653,7 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT; 653 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653 } 654 }
654 655
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP) 656 if (link_status)
656 netif_carrier_on(netdev); 657 netif_carrier_on(netdev);
657 else 658 else
658 netif_carrier_off(netdev); 659 netif_carrier_off(netdev);
@@ -718,10 +719,23 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
718 return vlan_tag; 719 return vlan_tag;
719} 720}
720 721
722/* Used only for IP tunnel packets */
723static u16 skb_inner_ip_proto(struct sk_buff *skb)
724{
725 return (inner_ip_hdr(skb)->version == 4) ?
726 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727}
728
729static u16 skb_ip_proto(struct sk_buff *skb)
730{
731 return (ip_hdr(skb)->version == 4) ?
732 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733}
734
721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 735static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) 736 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723{ 737{
724 u16 vlan_tag; 738 u16 vlan_tag, proto;
725 739
726 memset(hdr, 0, sizeof(*hdr)); 740 memset(hdr, 0, sizeof(*hdr));
727 741
@@ -734,9 +748,15 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) 748 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 750 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb)) 751 if (skb->encapsulation) {
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
753 proto = skb_inner_ip_proto(skb);
754 } else {
755 proto = skb_ip_proto(skb);
756 }
757 if (proto == IPPROTO_TCP)
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 758 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb)) 759 else if (proto == IPPROTO_UDP)
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); 760 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 } 761 }
742 762
@@ -935,9 +955,9 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
935 } 955 }
936 956
937 /* If vlan tag is already inlined in the packet, skip HW VLAN 957 /* If vlan tag is already inlined in the packet, skip HW VLAN
938 * tagging in UMC mode 958 * tagging in pvid-tagging mode
939 */ 959 */
940 if ((adapter->function_mode & UMC_ENABLED) && 960 if (be_pvid_tagging_enabled(adapter) &&
941 veh->h_vlan_proto == htons(ETH_P_8021Q)) 961 veh->h_vlan_proto == htons(ETH_P_8021Q))
942 *skip_hw_vlan = true; 962 *skip_hw_vlan = true;
943 963
@@ -1138,7 +1158,10 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1138 1158
1139 /* Packets with VID 0 are always received by Lancer by default */ 1159 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0) 1160 if (lancer_chip(adapter) && vid == 0)
1141 goto ret; 1161 return status;
1162
1163 if (adapter->vlan_tag[vid])
1164 return status;
1142 1165
1143 adapter->vlan_tag[vid] = 1; 1166 adapter->vlan_tag[vid] = 1;
1144 adapter->vlans_added++; 1167 adapter->vlans_added++;
@@ -1148,7 +1171,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1148 adapter->vlans_added--; 1171 adapter->vlans_added--;
1149 adapter->vlan_tag[vid] = 0; 1172 adapter->vlan_tag[vid] = 0;
1150 } 1173 }
1151ret: 1174
1152 return status; 1175 return status;
1153} 1176}
1154 1177
@@ -1288,6 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1288 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; 1311 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1289 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; 1312 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1290 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1313 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1314 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1291 1315
1292 return 0; 1316 return 0;
1293} 1317}
@@ -1342,11 +1366,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1342 return -EINVAL; 1366 return -EINVAL;
1343 } 1367 }
1344 1368
1345 if (lancer_chip(adapter)) 1369 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
1346 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1347 else
1348 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1349
1350 if (status) 1370 if (status)
1351 dev_err(&adapter->pdev->dev, 1371 dev_err(&adapter->pdev->dev,
1352 "tx rate %d on VF %d failed\n", rate, vf); 1372 "tx rate %d on VF %d failed\n", rate, vf);
@@ -1354,6 +1374,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1354 adapter->vf_cfg[vf].tx_rate = rate; 1374 adapter->vf_cfg[vf].tx_rate = rate;
1355 return status; 1375 return status;
1356} 1376}
1377static int be_set_vf_link_state(struct net_device *netdev, int vf,
1378 int link_state)
1379{
1380 struct be_adapter *adapter = netdev_priv(netdev);
1381 int status;
1382
1383 if (!sriov_enabled(adapter))
1384 return -EPERM;
1385
1386 if (vf >= adapter->num_vfs)
1387 return -EINVAL;
1388
1389 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1390 if (!status)
1391 adapter->vf_cfg[vf].plink_tracking = link_state;
1392
1393 return status;
1394}
1357 1395
1358static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts, 1396static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1359 ulong now) 1397 ulong now)
@@ -1386,15 +1424,15 @@ static void be_eqd_update(struct be_adapter *adapter)
1386 1424
1387 rxo = &adapter->rx_obj[eqo->idx]; 1425 rxo = &adapter->rx_obj[eqo->idx];
1388 do { 1426 do {
1389 start = u64_stats_fetch_begin_bh(&rxo->stats.sync); 1427 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1390 rx_pkts = rxo->stats.rx_pkts; 1428 rx_pkts = rxo->stats.rx_pkts;
1391 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start)); 1429 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1392 1430
1393 txo = &adapter->tx_obj[eqo->idx]; 1431 txo = &adapter->tx_obj[eqo->idx];
1394 do { 1432 do {
1395 start = u64_stats_fetch_begin_bh(&txo->stats.sync); 1433 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1396 tx_pkts = txo->stats.tx_reqs; 1434 tx_pkts = txo->stats.tx_reqs;
1397 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start)); 1435 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1398 1436
1399 1437
1400 /* Skip, if wrapped around or first calculation */ 1438 /* Skip, if wrapped around or first calculation */
@@ -1449,9 +1487,10 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
1449static inline bool csum_passed(struct be_rx_compl_info *rxcp) 1487static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1450{ 1488{
1451 /* L4 checksum is not reliable for non TCP/UDP packets. 1489 /* L4 checksum is not reliable for non TCP/UDP packets.
1452 * Also ignore ipcksm for ipv6 pkts */ 1490 * Also ignore ipcksm for ipv6 pkts
1491 */
1453 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && 1492 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1454 (rxcp->ip_csum || rxcp->ipv6); 1493 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1455} 1494}
1456 1495
1457static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) 1496static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
@@ -1464,11 +1503,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1464 rx_page_info = &rxo->page_info_tbl[frag_idx]; 1503 rx_page_info = &rxo->page_info_tbl[frag_idx];
1465 BUG_ON(!rx_page_info->page); 1504 BUG_ON(!rx_page_info->page);
1466 1505
1467 if (rx_page_info->last_page_user) { 1506 if (rx_page_info->last_frag) {
1468 dma_unmap_page(&adapter->pdev->dev, 1507 dma_unmap_page(&adapter->pdev->dev,
1469 dma_unmap_addr(rx_page_info, bus), 1508 dma_unmap_addr(rx_page_info, bus),
1470 adapter->big_page_size, DMA_FROM_DEVICE); 1509 adapter->big_page_size, DMA_FROM_DEVICE);
1471 rx_page_info->last_page_user = false; 1510 rx_page_info->last_frag = false;
1511 } else {
1512 dma_sync_single_for_cpu(&adapter->pdev->dev,
1513 dma_unmap_addr(rx_page_info, bus),
1514 rx_frag_size, DMA_FROM_DEVICE);
1472 } 1515 }
1473 1516
1474 queue_tail_inc(rxq); 1517 queue_tail_inc(rxq);
@@ -1590,6 +1633,8 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1590 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); 1633 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1591 if (netdev->features & NETIF_F_RXHASH) 1634 if (netdev->features & NETIF_F_RXHASH)
1592 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); 1635 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1636
1637 skb->encapsulation = rxcp->tunneled;
1593 skb_mark_napi_id(skb, napi); 1638 skb_mark_napi_id(skb, napi);
1594 1639
1595 if (rxcp->vlanf) 1640 if (rxcp->vlanf)
@@ -1646,6 +1691,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1646 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); 1691 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1647 if (adapter->netdev->features & NETIF_F_RXHASH) 1692 if (adapter->netdev->features & NETIF_F_RXHASH)
1648 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); 1693 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1694
1695 skb->encapsulation = rxcp->tunneled;
1649 skb_mark_napi_id(skb, napi); 1696 skb_mark_napi_id(skb, napi);
1650 1697
1651 if (rxcp->vlanf) 1698 if (rxcp->vlanf)
@@ -1676,12 +1723,14 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1676 rxcp->rss_hash = 1723 rxcp->rss_hash =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl); 1724 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1678 if (rxcp->vlanf) { 1725 if (rxcp->vlanf) {
1679 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, 1726 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1680 compl); 1727 compl);
1681 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1728 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1682 compl); 1729 compl);
1683 } 1730 }
1684 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); 1731 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1732 rxcp->tunneled =
1733 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
1685} 1734}
1686 1735
1687static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, 1736static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
@@ -1706,7 +1755,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1706 rxcp->rss_hash = 1755 rxcp->rss_hash =
1707 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl); 1756 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1708 if (rxcp->vlanf) { 1757 if (rxcp->vlanf) {
1709 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, 1758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1710 compl); 1759 compl);
1711 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1712 compl); 1761 compl);
@@ -1739,9 +1788,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1739 rxcp->l4_csum = 0; 1788 rxcp->l4_csum = 0;
1740 1789
1741 if (rxcp->vlanf) { 1790 if (rxcp->vlanf) {
1742 /* vlanf could be wrongly set in some cards. 1791 /* In QNQ modes, if qnq bit is not set, then the packet was
1743 * ignore if vtm is not set */ 1792 * tagged only with the transparent outer vlan-tag and must
1744 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm) 1793 * not be treated as a vlan packet by host
1794 */
1795 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1745 rxcp->vlanf = 0; 1796 rxcp->vlanf = 0;
1746 1797
1747 if (!lancer_chip(adapter)) 1798 if (!lancer_chip(adapter))
@@ -1800,17 +1851,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1800 rx_stats(rxo)->rx_post_fail++; 1851 rx_stats(rxo)->rx_post_fail++;
1801 break; 1852 break;
1802 } 1853 }
1803 page_info->page_offset = 0; 1854 page_offset = 0;
1804 } else { 1855 } else {
1805 get_page(pagep); 1856 get_page(pagep);
1806 page_info->page_offset = page_offset + rx_frag_size; 1857 page_offset += rx_frag_size;
1807 } 1858 }
1808 page_offset = page_info->page_offset; 1859 page_info->page_offset = page_offset;
1809 page_info->page = pagep; 1860 page_info->page = pagep;
1810 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1811 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1812 1861
1813 rxd = queue_head_node(rxq); 1862 rxd = queue_head_node(rxq);
1863 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1814 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 1864 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1815 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 1865 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1816 1866
@@ -1818,15 +1868,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1818 if ((page_offset + rx_frag_size + rx_frag_size) > 1868 if ((page_offset + rx_frag_size + rx_frag_size) >
1819 adapter->big_page_size) { 1869 adapter->big_page_size) {
1820 pagep = NULL; 1870 pagep = NULL;
1821 page_info->last_page_user = true; 1871 page_info->last_frag = true;
1872 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1873 } else {
1874 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1822 } 1875 }
1823 1876
1824 prev_page_info = page_info; 1877 prev_page_info = page_info;
1825 queue_head_inc(rxq); 1878 queue_head_inc(rxq);
1826 page_info = &rxo->page_info_tbl[rxq->head]; 1879 page_info = &rxo->page_info_tbl[rxq->head];
1827 } 1880 }
1828 if (pagep) 1881
1829 prev_page_info->last_page_user = true; 1882 /* Mark the last frag of a page when we break out of the above loop
1883 * with no more slots available in the RXQ
1884 */
1885 if (pagep) {
1886 prev_page_info->last_frag = true;
1887 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1888 }
1830 1889
1831 if (posted) { 1890 if (posted) {
1832 atomic_add(posted, &rxq->used); 1891 atomic_add(posted, &rxq->used);
@@ -1883,7 +1942,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
1883 queue_tail_inc(txq); 1942 queue_tail_inc(txq);
1884 } while (cur_index != last_index); 1943 } while (cur_index != last_index);
1885 1944
1886 kfree_skb(sent_skb); 1945 dev_kfree_skb_any(sent_skb);
1887 return num_wrbs; 1946 return num_wrbs;
1888} 1947}
1889 1948
@@ -2439,6 +2498,9 @@ void be_detect_error(struct be_adapter *adapter)
2439 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; 2498 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2440 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 2499 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2441 u32 i; 2500 u32 i;
2501 bool error_detected = false;
2502 struct device *dev = &adapter->pdev->dev;
2503 struct net_device *netdev = adapter->netdev;
2442 2504
2443 if (be_hw_error(adapter)) 2505 if (be_hw_error(adapter))
2444 return; 2506 return;
@@ -2450,6 +2512,21 @@ void be_detect_error(struct be_adapter *adapter)
2450 SLIPORT_ERROR1_OFFSET); 2512 SLIPORT_ERROR1_OFFSET);
2451 sliport_err2 = ioread32(adapter->db + 2513 sliport_err2 = ioread32(adapter->db +
2452 SLIPORT_ERROR2_OFFSET); 2514 SLIPORT_ERROR2_OFFSET);
2515 adapter->hw_error = true;
2516 /* Do not log error messages if its a FW reset */
2517 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2518 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2519 dev_info(dev, "Firmware update in progress\n");
2520 } else {
2521 error_detected = true;
2522 dev_err(dev, "Error detected in the card\n");
2523 dev_err(dev, "ERR: sliport status 0x%x\n",
2524 sliport_status);
2525 dev_err(dev, "ERR: sliport error1 0x%x\n",
2526 sliport_err1);
2527 dev_err(dev, "ERR: sliport error2 0x%x\n",
2528 sliport_err2);
2529 }
2453 } 2530 }
2454 } else { 2531 } else {
2455 pci_read_config_dword(adapter->pdev, 2532 pci_read_config_dword(adapter->pdev,
@@ -2463,51 +2540,33 @@ void be_detect_error(struct be_adapter *adapter)
2463 2540
2464 ue_lo = (ue_lo & ~ue_lo_mask); 2541 ue_lo = (ue_lo & ~ue_lo_mask);
2465 ue_hi = (ue_hi & ~ue_hi_mask); 2542 ue_hi = (ue_hi & ~ue_hi_mask);
2466 }
2467
2468 /* On certain platforms BE hardware can indicate spurious UEs.
2469 * Allow the h/w to stop working completely in case of a real UE.
2470 * Hence not setting the hw_error for UE detection.
2471 */
2472 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2473 adapter->hw_error = true;
2474 /* Do not log error messages if its a FW reset */
2475 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2476 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2477 dev_info(&adapter->pdev->dev,
2478 "Firmware update in progress\n");
2479 return;
2480 } else {
2481 dev_err(&adapter->pdev->dev,
2482 "Error detected in the card\n");
2483 }
2484 }
2485
2486 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2487 dev_err(&adapter->pdev->dev,
2488 "ERR: sliport status 0x%x\n", sliport_status);
2489 dev_err(&adapter->pdev->dev,
2490 "ERR: sliport error1 0x%x\n", sliport_err1);
2491 dev_err(&adapter->pdev->dev,
2492 "ERR: sliport error2 0x%x\n", sliport_err2);
2493 }
2494 2543
2495 if (ue_lo) { 2544 /* On certain platforms BE hardware can indicate spurious UEs.
2496 for (i = 0; ue_lo; ue_lo >>= 1, i++) { 2545 * Allow HW to stop working completely in case of a real UE.
2497 if (ue_lo & 1) 2546 * Hence not setting the hw_error for UE detection.
2498 dev_err(&adapter->pdev->dev, 2547 */
2499 "UE: %s bit set\n", ue_status_low_desc[i]);
2500 }
2501 }
2502 2548
2503 if (ue_hi) { 2549 if (ue_lo || ue_hi) {
2504 for (i = 0; ue_hi; ue_hi >>= 1, i++) { 2550 error_detected = true;
2505 if (ue_hi & 1) 2551 dev_err(dev,
2506 dev_err(&adapter->pdev->dev, 2552 "Unrecoverable Error detected in the adapter");
2507 "UE: %s bit set\n", ue_status_hi_desc[i]); 2553 dev_err(dev, "Please reboot server to recover");
2554 if (skyhawk_chip(adapter))
2555 adapter->hw_error = true;
2556 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2557 if (ue_lo & 1)
2558 dev_err(dev, "UE: %s bit set\n",
2559 ue_status_low_desc[i]);
2560 }
2561 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2562 if (ue_hi & 1)
2563 dev_err(dev, "UE: %s bit set\n",
2564 ue_status_hi_desc[i]);
2565 }
2508 } 2566 }
2509 } 2567 }
2510 2568 if (error_detected)
2569 netif_carrier_off(netdev);
2511} 2570}
2512 2571
2513static void be_msix_disable(struct be_adapter *adapter) 2572static void be_msix_disable(struct be_adapter *adapter)
@@ -2521,7 +2580,7 @@ static void be_msix_disable(struct be_adapter *adapter)
2521 2580
2522static int be_msix_enable(struct be_adapter *adapter) 2581static int be_msix_enable(struct be_adapter *adapter)
2523{ 2582{
2524 int i, status, num_vec; 2583 int i, num_vec;
2525 struct device *dev = &adapter->pdev->dev; 2584 struct device *dev = &adapter->pdev->dev;
2526 2585
2527 /* If RoCE is supported, program the max number of NIC vectors that 2586 /* If RoCE is supported, program the max number of NIC vectors that
@@ -2537,24 +2596,11 @@ static int be_msix_enable(struct be_adapter *adapter)
2537 for (i = 0; i < num_vec; i++) 2596 for (i = 0; i < num_vec; i++)
2538 adapter->msix_entries[i].entry = i; 2597 adapter->msix_entries[i].entry = i;
2539 2598
2540 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); 2599 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2541 if (status == 0) { 2600 MIN_MSIX_VECTORS, num_vec);
2542 goto done; 2601 if (num_vec < 0)
2543 } else if (status >= MIN_MSIX_VECTORS) { 2602 goto fail;
2544 num_vec = status;
2545 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2546 num_vec);
2547 if (!status)
2548 goto done;
2549 }
2550
2551 dev_warn(dev, "MSIx enable failed\n");
2552 2603
2553 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2554 if (!be_physfn(adapter))
2555 return status;
2556 return 0;
2557done:
2558 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) { 2604 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2559 adapter->num_msix_roce_vec = num_vec / 2; 2605 adapter->num_msix_roce_vec = num_vec / 2;
2560 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n", 2606 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
@@ -2566,6 +2612,14 @@ done:
2566 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n", 2612 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2567 adapter->num_msix_vec); 2613 adapter->num_msix_vec);
2568 return 0; 2614 return 0;
2615
2616fail:
2617 dev_warn(dev, "MSIx enable failed\n");
2618
2619 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2620 if (!be_physfn(adapter))
2621 return num_vec;
2622 return 0;
2569} 2623}
2570 2624
2571static inline int be_msix_vec_get(struct be_adapter *adapter, 2625static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -2807,6 +2861,12 @@ static int be_open(struct net_device *netdev)
2807 2861
2808 netif_tx_start_all_queues(netdev); 2862 netif_tx_start_all_queues(netdev);
2809 be_roce_dev_open(adapter); 2863 be_roce_dev_open(adapter);
2864
2865#ifdef CONFIG_BE2NET_VXLAN
2866 if (skyhawk_chip(adapter))
2867 vxlan_get_rx_port(netdev);
2868#endif
2869
2810 return 0; 2870 return 0;
2811err: 2871err:
2812 be_close(adapter->netdev); 2872 be_close(adapter->netdev);
@@ -2962,6 +3022,21 @@ static void be_mac_clear(struct be_adapter *adapter)
2962 } 3022 }
2963} 3023}
2964 3024
3025#ifdef CONFIG_BE2NET_VXLAN
3026static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3027{
3028 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3029 be_cmd_manage_iface(adapter, adapter->if_handle,
3030 OP_CONVERT_TUNNEL_TO_NORMAL);
3031
3032 if (adapter->vxlan_port)
3033 be_cmd_set_vxlan_port(adapter, 0);
3034
3035 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3036 adapter->vxlan_port = 0;
3037}
3038#endif
3039
2965static int be_clear(struct be_adapter *adapter) 3040static int be_clear(struct be_adapter *adapter)
2966{ 3041{
2967 be_cancel_worker(adapter); 3042 be_cancel_worker(adapter);
@@ -2969,6 +3044,9 @@ static int be_clear(struct be_adapter *adapter)
2969 if (sriov_enabled(adapter)) 3044 if (sriov_enabled(adapter))
2970 be_vf_clear(adapter); 3045 be_vf_clear(adapter);
2971 3046
3047#ifdef CONFIG_BE2NET_VXLAN
3048 be_disable_vxlan_offloads(adapter);
3049#endif
2972 /* delete the primary mac along with the uc-mac list */ 3050 /* delete the primary mac along with the uc-mac list */
2973 be_mac_clear(adapter); 3051 be_mac_clear(adapter);
2974 3052
@@ -3093,15 +3171,19 @@ static int be_vf_setup(struct be_adapter *adapter)
3093 * Allow full available bandwidth 3171 * Allow full available bandwidth
3094 */ 3172 */
3095 if (BE3_chip(adapter) && !old_vfs) 3173 if (BE3_chip(adapter) && !old_vfs)
3096 be_cmd_set_qos(adapter, 1000, vf+1); 3174 be_cmd_config_qos(adapter, 1000, vf + 1);
3097 3175
3098 status = be_cmd_link_status_query(adapter, &lnk_speed, 3176 status = be_cmd_link_status_query(adapter, &lnk_speed,
3099 NULL, vf + 1); 3177 NULL, vf + 1);
3100 if (!status) 3178 if (!status)
3101 vf_cfg->tx_rate = lnk_speed; 3179 vf_cfg->tx_rate = lnk_speed;
3102 3180
3103 if (!old_vfs) 3181 if (!old_vfs) {
3104 be_cmd_enable_vf(adapter, vf + 1); 3182 be_cmd_enable_vf(adapter, vf + 1);
3183 be_cmd_set_logical_link_config(adapter,
3184 IFLA_VF_LINK_STATE_AUTO,
3185 vf+1);
3186 }
3105 } 3187 }
3106 3188
3107 if (!old_vfs) { 3189 if (!old_vfs) {
@@ -3119,19 +3201,38 @@ err:
3119 return status; 3201 return status;
3120} 3202}
3121 3203
3204/* Converting function_mode bits on BE3 to SH mc_type enums */
3205
3206static u8 be_convert_mc_type(u32 function_mode)
3207{
3208 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3209 return vNIC1;
3210 else if (function_mode & FLEX10_MODE)
3211 return FLEX10;
3212 else if (function_mode & VNIC_MODE)
3213 return vNIC2;
3214 else if (function_mode & UMC_ENABLED)
3215 return UMC;
3216 else
3217 return MC_NONE;
3218}
3219
3122/* On BE2/BE3 FW does not suggest the supported limits */ 3220/* On BE2/BE3 FW does not suggest the supported limits */
3123static void BEx_get_resources(struct be_adapter *adapter, 3221static void BEx_get_resources(struct be_adapter *adapter,
3124 struct be_resources *res) 3222 struct be_resources *res)
3125{ 3223{
3126 struct pci_dev *pdev = adapter->pdev; 3224 struct pci_dev *pdev = adapter->pdev;
3127 bool use_sriov = false; 3225 bool use_sriov = false;
3128 int max_vfs; 3226 int max_vfs = 0;
3129 3227
3130 max_vfs = pci_sriov_get_totalvfs(pdev); 3228 if (be_physfn(adapter) && BE3_chip(adapter)) {
3131 3229 be_cmd_get_profile_config(adapter, res, 0);
3132 if (BE3_chip(adapter) && sriov_want(adapter)) { 3230 /* Some old versions of BE3 FW don't report max_vfs value */
3133 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3231 if (res->max_vfs == 0) {
3134 use_sriov = res->max_vfs; 3232 max_vfs = pci_sriov_get_totalvfs(pdev);
3233 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3234 }
3235 use_sriov = res->max_vfs && sriov_want(adapter);
3135 } 3236 }
3136 3237
3137 if (be_physfn(adapter)) 3238 if (be_physfn(adapter))
@@ -3139,17 +3240,32 @@ static void BEx_get_resources(struct be_adapter *adapter,
3139 else 3240 else
3140 res->max_uc_mac = BE_VF_UC_PMAC_COUNT; 3241 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3141 3242
3142 if (adapter->function_mode & FLEX10_MODE) 3243 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3143 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; 3244
3144 else if (adapter->function_mode & UMC_ENABLED) 3245 if (be_is_mc(adapter)) {
3145 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED; 3246 /* Assuming that there are 4 channels per port,
3146 else 3247 * when multi-channel is enabled
3248 */
3249 if (be_is_qnq_mode(adapter))
3250 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3251 else
3252 /* In a non-qnq multichannel mode, the pvid
3253 * takes up one vlan entry
3254 */
3255 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3256 } else {
3147 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 3257 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3258 }
3259
3148 res->max_mcast_mac = BE_MAX_MC; 3260 res->max_mcast_mac = BE_MAX_MC;
3149 3261
3150 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */ 3262 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3151 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) || 3263 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3152 !be_physfn(adapter) || (adapter->port_num > 1)) 3264 * *only* if it is RSS-capable.
3265 */
3266 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3267 !be_physfn(adapter) || (be_is_mc(adapter) &&
3268 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
3153 res->max_tx_qs = 1; 3269 res->max_tx_qs = 1;
3154 else 3270 else
3155 res->max_tx_qs = BE3_MAX_TX_QS; 3271 res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3161,7 +3277,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
3161 res->max_rx_qs = res->max_rss_qs + 1; 3277 res->max_rx_qs = res->max_rss_qs + 1;
3162 3278
3163 if (be_physfn(adapter)) 3279 if (be_physfn(adapter))
3164 res->max_evt_qs = (max_vfs > 0) ? 3280 res->max_evt_qs = (res->max_vfs > 0) ?
3165 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; 3281 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3166 else 3282 else
3167 res->max_evt_qs = 1; 3283 res->max_evt_qs = 1;
@@ -3252,9 +3368,8 @@ static int be_get_config(struct be_adapter *adapter)
3252 if (status) 3368 if (status)
3253 return status; 3369 return status;
3254 3370
3255 /* primary mac needs 1 pmac entry */ 3371 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3256 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32), 3372 sizeof(*adapter->pmac_id), GFP_KERNEL);
3257 GFP_KERNEL);
3258 if (!adapter->pmac_id) 3373 if (!adapter->pmac_id)
3259 return -ENOMEM; 3374 return -ENOMEM;
3260 3375
@@ -3428,6 +3543,10 @@ static int be_setup(struct be_adapter *adapter)
3428 be_cmd_set_flow_control(adapter, adapter->tx_fc, 3543 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3429 adapter->rx_fc); 3544 adapter->rx_fc);
3430 3545
3546 if (be_physfn(adapter))
3547 be_cmd_set_logical_link_config(adapter,
3548 IFLA_VF_LINK_STATE_AUTO, 0);
3549
3431 if (sriov_want(adapter)) { 3550 if (sriov_want(adapter)) {
3432 if (be_max_vfs(adapter)) 3551 if (be_max_vfs(adapter))
3433 be_vf_setup(adapter); 3552 be_vf_setup(adapter);
@@ -4052,6 +4171,67 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4052 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB); 4171 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4053} 4172}
4054 4173
4174#ifdef CONFIG_BE2NET_VXLAN
4175static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4176 __be16 port)
4177{
4178 struct be_adapter *adapter = netdev_priv(netdev);
4179 struct device *dev = &adapter->pdev->dev;
4180 int status;
4181
4182 if (lancer_chip(adapter) || BEx_chip(adapter))
4183 return;
4184
4185 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4186 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4187 be16_to_cpu(port));
4188 dev_info(dev,
4189 "Only one UDP port supported for VxLAN offloads\n");
4190 return;
4191 }
4192
4193 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4194 OP_CONVERT_NORMAL_TO_TUNNEL);
4195 if (status) {
4196 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4197 goto err;
4198 }
4199
4200 status = be_cmd_set_vxlan_port(adapter, port);
4201 if (status) {
4202 dev_warn(dev, "Failed to add VxLAN port\n");
4203 goto err;
4204 }
4205 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4206 adapter->vxlan_port = port;
4207
4208 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4209 be16_to_cpu(port));
4210 return;
4211err:
4212 be_disable_vxlan_offloads(adapter);
4213 return;
4214}
4215
4216static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4217 __be16 port)
4218{
4219 struct be_adapter *adapter = netdev_priv(netdev);
4220
4221 if (lancer_chip(adapter) || BEx_chip(adapter))
4222 return;
4223
4224 if (adapter->vxlan_port != port)
4225 return;
4226
4227 be_disable_vxlan_offloads(adapter);
4228
4229 dev_info(&adapter->pdev->dev,
4230 "Disabled VxLAN offloads for UDP port %d\n",
4231 be16_to_cpu(port));
4232}
4233#endif
4234
4055static const struct net_device_ops be_netdev_ops = { 4235static const struct net_device_ops be_netdev_ops = {
4056 .ndo_open = be_open, 4236 .ndo_open = be_open,
4057 .ndo_stop = be_close, 4237 .ndo_stop = be_close,
@@ -4067,13 +4247,18 @@ static const struct net_device_ops be_netdev_ops = {
4067 .ndo_set_vf_vlan = be_set_vf_vlan, 4247 .ndo_set_vf_vlan = be_set_vf_vlan,
4068 .ndo_set_vf_tx_rate = be_set_vf_tx_rate, 4248 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
4069 .ndo_get_vf_config = be_get_vf_config, 4249 .ndo_get_vf_config = be_get_vf_config,
4250 .ndo_set_vf_link_state = be_set_vf_link_state,
4070#ifdef CONFIG_NET_POLL_CONTROLLER 4251#ifdef CONFIG_NET_POLL_CONTROLLER
4071 .ndo_poll_controller = be_netpoll, 4252 .ndo_poll_controller = be_netpoll,
4072#endif 4253#endif
4073 .ndo_bridge_setlink = be_ndo_bridge_setlink, 4254 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4074 .ndo_bridge_getlink = be_ndo_bridge_getlink, 4255 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4075#ifdef CONFIG_NET_RX_BUSY_POLL 4256#ifdef CONFIG_NET_RX_BUSY_POLL
4076 .ndo_busy_poll = be_busy_poll 4257 .ndo_busy_poll = be_busy_poll,
4258#endif
4259#ifdef CONFIG_BE2NET_VXLAN
4260 .ndo_add_vxlan_port = be_add_vxlan_port,
4261 .ndo_del_vxlan_port = be_del_vxlan_port,
4077#endif 4262#endif
4078}; 4263};
4079 4264
@@ -4081,6 +4266,12 @@ static void be_netdev_init(struct net_device *netdev)
4081{ 4266{
4082 struct be_adapter *adapter = netdev_priv(netdev); 4267 struct be_adapter *adapter = netdev_priv(netdev);
4083 4268
4269 if (skyhawk_chip(adapter)) {
4270 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4271 NETIF_F_TSO | NETIF_F_TSO6 |
4272 NETIF_F_GSO_UDP_TUNNEL;
4273 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4274 }
4084 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 4275 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4085 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 4276 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4086 NETIF_F_HW_VLAN_CTAG_TX; 4277 NETIF_F_HW_VLAN_CTAG_TX;
@@ -4427,14 +4618,32 @@ static bool be_reset_required(struct be_adapter *adapter)
4427 4618
4428static char *mc_name(struct be_adapter *adapter) 4619static char *mc_name(struct be_adapter *adapter)
4429{ 4620{
4430 if (adapter->function_mode & FLEX10_MODE) 4621 char *str = ""; /* default */
4431 return "FLEX10"; 4622
4432 else if (adapter->function_mode & VNIC_MODE) 4623 switch (adapter->mc_type) {
4433 return "vNIC"; 4624 case UMC:
4434 else if (adapter->function_mode & UMC_ENABLED) 4625 str = "UMC";
4435 return "UMC"; 4626 break;
4436 else 4627 case FLEX10:
4437 return ""; 4628 str = "FLEX10";
4629 break;
4630 case vNIC1:
4631 str = "vNIC-1";
4632 break;
4633 case nPAR:
4634 str = "nPAR";
4635 break;
4636 case UFP:
4637 str = "UFP";
4638 break;
4639 case vNIC2:
4640 str = "vNIC-2";
4641 break;
4642 default:
4643 str = "";
4644 }
4645
4646 return str;
4438} 4647}
4439 4648
4440static inline char *func_name(struct be_adapter *adapter) 4649static inline char *func_name(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 9cd5415fe017..a5dae4a62bb3 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 2cd1129e19af..a3ef8f804b9e 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 55e0fa03dc90..8b70ca7e342b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -660,11 +660,6 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
660 return -EBUSY; 660 return -EBUSY;
661} 661}
662 662
663static int ethoc_mdio_reset(struct mii_bus *bus)
664{
665 return 0;
666}
667
668static void ethoc_mdio_poll(struct net_device *dev) 663static void ethoc_mdio_poll(struct net_device *dev)
669{ 664{
670} 665}
@@ -1210,7 +1205,6 @@ static int ethoc_probe(struct platform_device *pdev)
1210 priv->mdio->name, pdev->id); 1205 priv->mdio->name, pdev->id);
1211 priv->mdio->read = ethoc_mdio_read; 1206 priv->mdio->read = ethoc_mdio_read;
1212 priv->mdio->write = ethoc_mdio_write; 1207 priv->mdio->write = ethoc_mdio_write;
1213 priv->mdio->reset = ethoc_mdio_reset;
1214 priv->mdio->priv = priv; 1208 priv->mdio->priv = priv;
1215 1209
1216 priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1210 priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c11ecbc98149..68069eabc4f8 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -940,11 +940,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
940 return -EIO; 940 return -EIO;
941} 941}
942 942
943static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
944{
945 return 0;
946}
947
948/****************************************************************************** 943/******************************************************************************
949 * struct ethtool_ops functions 944 * struct ethtool_ops functions
950 *****************************************************************************/ 945 *****************************************************************************/
@@ -1262,7 +1257,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
1262 priv->mii_bus->priv = netdev; 1257 priv->mii_bus->priv = netdev;
1263 priv->mii_bus->read = ftgmac100_mdiobus_read; 1258 priv->mii_bus->read = ftgmac100_mdiobus_read;
1264 priv->mii_bus->write = ftgmac100_mdiobus_write; 1259 priv->mii_bus->write = ftgmac100_mdiobus_write;
1265 priv->mii_bus->reset = ftgmac100_mdiobus_reset;
1266 priv->mii_bus->irq = priv->phy_irq; 1260 priv->mii_bus->irq = priv->phy_irq;
1267 1261
1268 for (i = 0; i < PHY_MAX_ADDR; i++) 1262 for (i = 0; i < PHY_MAX_ADDR; i++)
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 549ce13b92ac..71debd1c18c9 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
14obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
15obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o 15obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
16gianfar_driver-objs := gianfar.o \ 16gianfar_driver-objs := gianfar.o \
17 gianfar_ethtool.o \ 17 gianfar_ethtool.o
18 gianfar_sysfs.o
19obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o 18obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
20ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o 19ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 03a351300013..8d69e439f0c5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -338,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
338 338
339 /* Protocol checksum off-load for TCP and UDP. */ 339 /* Protocol checksum off-load for TCP and UDP. */
340 if (fec_enet_clear_csum(skb, ndev)) { 340 if (fec_enet_clear_csum(skb, ndev)) {
341 kfree_skb(skb); 341 dev_kfree_skb_any(skb);
342 return NETDEV_TX_OK; 342 return NETDEV_TX_OK;
343 } 343 }
344 344
@@ -1255,11 +1255,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1255 return 0; 1255 return 0;
1256} 1256}
1257 1257
1258static int fec_enet_mdio_reset(struct mii_bus *bus)
1259{
1260 return 0;
1261}
1262
1263static int fec_enet_mii_probe(struct net_device *ndev) 1258static int fec_enet_mii_probe(struct net_device *ndev)
1264{ 1259{
1265 struct fec_enet_private *fep = netdev_priv(ndev); 1260 struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1384,7 +1379,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1384 fep->mii_bus->name = "fec_enet_mii_bus"; 1379 fep->mii_bus->name = "fec_enet_mii_bus";
1385 fep->mii_bus->read = fec_enet_mdio_read; 1380 fep->mii_bus->read = fec_enet_mdio_read;
1386 fep->mii_bus->write = fec_enet_mdio_write; 1381 fep->mii_bus->write = fec_enet_mdio_write;
1387 fep->mii_bus->reset = fec_enet_mdio_reset;
1388 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1382 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1389 pdev->name, fep->dev_id + 1); 1383 pdev->name, fep->dev_id + 1);
1390 fep->mii_bus->priv = fep; 1384 fep->mii_bus->priv = fep;
@@ -1904,10 +1898,11 @@ fec_set_mac_address(struct net_device *ndev, void *p)
1904 struct fec_enet_private *fep = netdev_priv(ndev); 1898 struct fec_enet_private *fep = netdev_priv(ndev);
1905 struct sockaddr *addr = p; 1899 struct sockaddr *addr = p;
1906 1900
1907 if (!is_valid_ether_addr(addr->sa_data)) 1901 if (addr) {
1908 return -EADDRNOTAVAIL; 1902 if (!is_valid_ether_addr(addr->sa_data))
1909 1903 return -EADDRNOTAVAIL;
1910 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 1904 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1905 }
1911 1906
1912 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 1907 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1913 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 1908 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
@@ -2006,6 +2001,8 @@ static int fec_enet_init(struct net_device *ndev)
2006 2001
2007 /* Get the Ethernet address */ 2002 /* Get the Ethernet address */
2008 fec_get_mac(ndev); 2003 fec_get_mac(ndev);
2004 /* make sure MAC we just acquired is programmed into the hw */
2005 fec_set_mac_address(ndev, NULL);
2009 2006
2010 /* init the tx & rx ring size */ 2007 /* init the tx & rx ring size */
2011 fep->tx_ring_size = TX_RING_SIZE; 2008 fep->tx_ring_size = TX_RING_SIZE;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 89ccb5b08708..82386b29914a 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -372,6 +372,7 @@ void fec_ptp_init(struct platform_device *pdev)
372 fep->ptp_caps.n_alarm = 0; 372 fep->ptp_caps.n_alarm = 0;
373 fep->ptp_caps.n_ext_ts = 0; 373 fep->ptp_caps.n_ext_ts = 0;
374 fep->ptp_caps.n_per_out = 0; 374 fep->ptp_caps.n_per_out = 0;
375 fep->ptp_caps.n_pins = 0;
375 fep->ptp_caps.pps = 0; 376 fep->ptp_caps.pps = 0;
376 fep->ptp_caps.adjfreq = fec_ptp_adjfreq; 377 fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
377 fep->ptp_caps.adjtime = fec_ptp_adjtime; 378 fep->ptp_caps.adjtime = fec_ptp_adjtime;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 62f042d4aaa9..dc80db41d6b3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -91,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
91 u16 pkt_len, sc; 91 u16 pkt_len, sc;
92 int curidx; 92 int curidx;
93 93
94 if (budget <= 0)
95 return received;
96
94 /* 97 /*
95 * First, grab all of the stats for the incoming packet. 98 * First, grab all of the stats for the incoming packet.
96 * These get messed up if we get called due to a busy condition. 99 * These get messed up if we get called due to a busy condition.
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 7e69c983d12a..ebf5d6429a8d 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -95,12 +95,6 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
95 95
96} 96}
97 97
98static int fs_enet_fec_mii_reset(struct mii_bus *bus)
99{
100 /* nothing here - for now */
101 return 0;
102}
103
104static struct of_device_id fs_enet_mdio_fec_match[]; 98static struct of_device_id fs_enet_mdio_fec_match[];
105static int fs_enet_mdio_probe(struct platform_device *ofdev) 99static int fs_enet_mdio_probe(struct platform_device *ofdev)
106{ 100{
@@ -128,7 +122,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
128 new_bus->name = "FEC MII Bus"; 122 new_bus->name = "FEC MII Bus";
129 new_bus->read = &fs_enet_fec_mii_read; 123 new_bus->read = &fs_enet_fec_mii_read;
130 new_bus->write = &fs_enet_fec_mii_write; 124 new_bus->write = &fs_enet_fec_mii_write;
131 new_bus->reset = &fs_enet_fec_mii_reset;
132 125
133 ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); 126 ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
134 if (ret) 127 if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ad5a5aadc7e1..9125d9abf099 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc. 13 * Copyright 2007 MontaVista Software, Inc.
14 * 14 *
15 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
@@ -121,7 +121,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123static void adjust_link(struct net_device *dev); 123static void adjust_link(struct net_device *dev);
124static void init_registers(struct net_device *dev);
125static int init_phy(struct net_device *dev); 124static int init_phy(struct net_device *dev);
126static int gfar_probe(struct platform_device *ofdev); 125static int gfar_probe(struct platform_device *ofdev);
127static int gfar_remove(struct platform_device *ofdev); 126static int gfar_remove(struct platform_device *ofdev);
@@ -129,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv);
129static void gfar_set_multi(struct net_device *dev); 128static void gfar_set_multi(struct net_device *dev);
130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 129static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
131static void gfar_configure_serdes(struct net_device *dev); 130static void gfar_configure_serdes(struct net_device *dev);
132static int gfar_poll(struct napi_struct *napi, int budget); 131static int gfar_poll_rx(struct napi_struct *napi, int budget);
133static int gfar_poll_sq(struct napi_struct *napi, int budget); 132static int gfar_poll_tx(struct napi_struct *napi, int budget);
133static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
134static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
134#ifdef CONFIG_NET_POLL_CONTROLLER 135#ifdef CONFIG_NET_POLL_CONTROLLER
135static void gfar_netpoll(struct net_device *dev); 136static void gfar_netpoll(struct net_device *dev);
136#endif 137#endif
@@ -138,9 +139,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
138static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 139static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
139static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 140static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
140 int amount_pull, struct napi_struct *napi); 141 int amount_pull, struct napi_struct *napi);
141void gfar_halt(struct net_device *dev); 142static void gfar_halt_nodisable(struct gfar_private *priv);
142static void gfar_halt_nodisable(struct net_device *dev);
143void gfar_start(struct net_device *dev);
144static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
145static void gfar_set_mac_for_addr(struct net_device *dev, int num, 144static void gfar_set_mac_for_addr(struct net_device *dev, int num,
146 const u8 *addr); 145 const u8 *addr);
@@ -332,72 +331,76 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
332 } 331 }
333} 332}
334 333
335static void gfar_init_mac(struct net_device *ndev) 334static void gfar_rx_buff_size_config(struct gfar_private *priv)
336{ 335{
337 struct gfar_private *priv = netdev_priv(ndev); 336 int frame_size = priv->ndev->mtu + ETH_HLEN;
338 struct gfar __iomem *regs = priv->gfargrp[0].regs;
339 u32 rctrl = 0;
340 u32 tctrl = 0;
341 u32 attrs = 0;
342
343 /* write the tx/rx base registers */
344 gfar_init_tx_rx_base(priv);
345
346 /* Configure the coalescing support */
347 gfar_configure_coalescing_all(priv);
348 337
349 /* set this when rx hw offload (TOE) functions are being used */ 338 /* set this when rx hw offload (TOE) functions are being used */
350 priv->uses_rxfcb = 0; 339 priv->uses_rxfcb = 0;
351 340
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 priv->uses_rxfcb = 1;
343
344 if (priv->hwts_rx_en)
345 priv->uses_rxfcb = 1;
346
347 if (priv->uses_rxfcb)
348 frame_size += GMAC_FCB_LEN;
349
350 frame_size += priv->padding;
351
352 frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
353 INCREMENTAL_BUFFER_SIZE;
354
355 priv->rx_buffer_size = frame_size;
356}
357
358static void gfar_mac_rx_config(struct gfar_private *priv)
359{
360 struct gfar __iomem *regs = priv->gfargrp[0].regs;
361 u32 rctrl = 0;
362
352 if (priv->rx_filer_enable) { 363 if (priv->rx_filer_enable) {
353 rctrl |= RCTRL_FILREN; 364 rctrl |= RCTRL_FILREN;
354 /* Program the RIR0 reg with the required distribution */ 365 /* Program the RIR0 reg with the required distribution */
355 gfar_write(&regs->rir0, DEFAULT_RIR0); 366 if (priv->poll_mode == GFAR_SQ_POLLING)
367 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
368 else /* GFAR_MQ_POLLING */
369 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
356 } 370 }
357 371
358 /* Restore PROMISC mode */ 372 /* Restore PROMISC mode */
359 if (ndev->flags & IFF_PROMISC) 373 if (priv->ndev->flags & IFF_PROMISC)
360 rctrl |= RCTRL_PROM; 374 rctrl |= RCTRL_PROM;
361 375
362 if (ndev->features & NETIF_F_RXCSUM) { 376 if (priv->ndev->features & NETIF_F_RXCSUM)
363 rctrl |= RCTRL_CHECKSUMMING; 377 rctrl |= RCTRL_CHECKSUMMING;
364 priv->uses_rxfcb = 1;
365 }
366 378
367 if (priv->extended_hash) { 379 if (priv->extended_hash)
368 rctrl |= RCTRL_EXTHASH; 380 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
369
370 gfar_clear_exact_match(ndev);
371 rctrl |= RCTRL_EMEN;
372 }
373 381
374 if (priv->padding) { 382 if (priv->padding) {
375 rctrl &= ~RCTRL_PAL_MASK; 383 rctrl &= ~RCTRL_PAL_MASK;
376 rctrl |= RCTRL_PADDING(priv->padding); 384 rctrl |= RCTRL_PADDING(priv->padding);
377 } 385 }
378 386
379 /* Insert receive time stamps into padding alignment bytes */
380 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
381 rctrl &= ~RCTRL_PAL_MASK;
382 rctrl |= RCTRL_PADDING(8);
383 priv->padding = 8;
384 }
385
386 /* Enable HW time stamping if requested from user space */ 387 /* Enable HW time stamping if requested from user space */
387 if (priv->hwts_rx_en) { 388 if (priv->hwts_rx_en)
388 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
389 priv->uses_rxfcb = 1;
390 }
391 390
392 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { 391 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
393 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 392 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
394 priv->uses_rxfcb = 1;
395 }
396 393
397 /* Init rctrl based on our settings */ 394 /* Init rctrl based on our settings */
398 gfar_write(&regs->rctrl, rctrl); 395 gfar_write(&regs->rctrl, rctrl);
396}
397
398static void gfar_mac_tx_config(struct gfar_private *priv)
399{
400 struct gfar __iomem *regs = priv->gfargrp[0].regs;
401 u32 tctrl = 0;
399 402
400 if (ndev->features & NETIF_F_IP_CSUM) 403 if (priv->ndev->features & NETIF_F_IP_CSUM)
401 tctrl |= TCTRL_INIT_CSUM; 404 tctrl |= TCTRL_INIT_CSUM;
402 405
403 if (priv->prio_sched_en) 406 if (priv->prio_sched_en)
@@ -408,30 +411,51 @@ static void gfar_init_mac(struct net_device *ndev)
408 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT); 411 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
409 } 412 }
410 413
411 gfar_write(&regs->tctrl, tctrl); 414 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
415 tctrl |= TCTRL_VLINS;
412 416
413 /* Set the extraction length and index */ 417 gfar_write(&regs->tctrl, tctrl);
414 attrs = ATTRELI_EL(priv->rx_stash_size) | 418}
415 ATTRELI_EI(priv->rx_stash_index);
416 419
417 gfar_write(&regs->attreli, attrs); 420static void gfar_configure_coalescing(struct gfar_private *priv,
421 unsigned long tx_mask, unsigned long rx_mask)
422{
423 struct gfar __iomem *regs = priv->gfargrp[0].regs;
424 u32 __iomem *baddr;
418 425
419 /* Start with defaults, and add stashing or locking 426 if (priv->mode == MQ_MG_MODE) {
420 * depending on the approprate variables 427 int i = 0;
421 */
422 attrs = ATTR_INIT_SETTINGS;
423 428
424 if (priv->bd_stash_en) 429 baddr = &regs->txic0;
425 attrs |= ATTR_BDSTASH; 430 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
431 gfar_write(baddr + i, 0);
432 if (likely(priv->tx_queue[i]->txcoalescing))
433 gfar_write(baddr + i, priv->tx_queue[i]->txic);
434 }
426 435
427 if (priv->rx_stash_size != 0) 436 baddr = &regs->rxic0;
428 attrs |= ATTR_BUFSTASH; 437 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
438 gfar_write(baddr + i, 0);
439 if (likely(priv->rx_queue[i]->rxcoalescing))
440 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
441 }
442 } else {
443 /* Backward compatible case -- even if we enable
444 * multiple queues, there's only single reg to program
445 */
446 gfar_write(&regs->txic, 0);
447 if (likely(priv->tx_queue[0]->txcoalescing))
448 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
429 449
430 gfar_write(&regs->attr, attrs); 450 gfar_write(&regs->rxic, 0);
451 if (unlikely(priv->rx_queue[0]->rxcoalescing))
452 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
453 }
454}
431 455
432 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold); 456void gfar_configure_coalescing_all(struct gfar_private *priv)
433 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve); 457{
434 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 458 gfar_configure_coalescing(priv, 0xFF, 0xFF);
435} 459}
436 460
437static struct net_device_stats *gfar_get_stats(struct net_device *dev) 461static struct net_device_stats *gfar_get_stats(struct net_device *dev)
@@ -479,12 +503,27 @@ static const struct net_device_ops gfar_netdev_ops = {
479#endif 503#endif
480}; 504};
481 505
482void lock_rx_qs(struct gfar_private *priv) 506static void gfar_ints_disable(struct gfar_private *priv)
483{ 507{
484 int i; 508 int i;
509 for (i = 0; i < priv->num_grps; i++) {
510 struct gfar __iomem *regs = priv->gfargrp[i].regs;
511 /* Clear IEVENT */
512 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
485 513
486 for (i = 0; i < priv->num_rx_queues; i++) 514 /* Initialize IMASK */
487 spin_lock(&priv->rx_queue[i]->rxlock); 515 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
516 }
517}
518
519static void gfar_ints_enable(struct gfar_private *priv)
520{
521 int i;
522 for (i = 0; i < priv->num_grps; i++) {
523 struct gfar __iomem *regs = priv->gfargrp[i].regs;
524 /* Unmask the interrupts we look for */
525 gfar_write(&regs->imask, IMASK_DEFAULT);
526 }
488} 527}
489 528
490void lock_tx_qs(struct gfar_private *priv) 529void lock_tx_qs(struct gfar_private *priv)
@@ -495,23 +534,50 @@ void lock_tx_qs(struct gfar_private *priv)
495 spin_lock(&priv->tx_queue[i]->txlock); 534 spin_lock(&priv->tx_queue[i]->txlock);
496} 535}
497 536
498void unlock_rx_qs(struct gfar_private *priv) 537void unlock_tx_qs(struct gfar_private *priv)
499{ 538{
500 int i; 539 int i;
501 540
502 for (i = 0; i < priv->num_rx_queues; i++) 541 for (i = 0; i < priv->num_tx_queues; i++)
503 spin_unlock(&priv->rx_queue[i]->rxlock); 542 spin_unlock(&priv->tx_queue[i]->txlock);
504} 543}
505 544
506void unlock_tx_qs(struct gfar_private *priv) 545static int gfar_alloc_tx_queues(struct gfar_private *priv)
507{ 546{
508 int i; 547 int i;
509 548
510 for (i = 0; i < priv->num_tx_queues; i++) 549 for (i = 0; i < priv->num_tx_queues; i++) {
511 spin_unlock(&priv->tx_queue[i]->txlock); 550 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
551 GFP_KERNEL);
552 if (!priv->tx_queue[i])
553 return -ENOMEM;
554
555 priv->tx_queue[i]->tx_skbuff = NULL;
556 priv->tx_queue[i]->qindex = i;
557 priv->tx_queue[i]->dev = priv->ndev;
558 spin_lock_init(&(priv->tx_queue[i]->txlock));
559 }
560 return 0;
512} 561}
513 562
514static void free_tx_pointers(struct gfar_private *priv) 563static int gfar_alloc_rx_queues(struct gfar_private *priv)
564{
565 int i;
566
567 for (i = 0; i < priv->num_rx_queues; i++) {
568 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
569 GFP_KERNEL);
570 if (!priv->rx_queue[i])
571 return -ENOMEM;
572
573 priv->rx_queue[i]->rx_skbuff = NULL;
574 priv->rx_queue[i]->qindex = i;
575 priv->rx_queue[i]->dev = priv->ndev;
576 }
577 return 0;
578}
579
580static void gfar_free_tx_queues(struct gfar_private *priv)
515{ 581{
516 int i; 582 int i;
517 583
@@ -519,7 +585,7 @@ static void free_tx_pointers(struct gfar_private *priv)
519 kfree(priv->tx_queue[i]); 585 kfree(priv->tx_queue[i]);
520} 586}
521 587
522static void free_rx_pointers(struct gfar_private *priv) 588static void gfar_free_rx_queues(struct gfar_private *priv)
523{ 589{
524 int i; 590 int i;
525 591
@@ -553,23 +619,26 @@ static void disable_napi(struct gfar_private *priv)
553{ 619{
554 int i; 620 int i;
555 621
556 for (i = 0; i < priv->num_grps; i++) 622 for (i = 0; i < priv->num_grps; i++) {
557 napi_disable(&priv->gfargrp[i].napi); 623 napi_disable(&priv->gfargrp[i].napi_rx);
624 napi_disable(&priv->gfargrp[i].napi_tx);
625 }
558} 626}
559 627
560static void enable_napi(struct gfar_private *priv) 628static void enable_napi(struct gfar_private *priv)
561{ 629{
562 int i; 630 int i;
563 631
564 for (i = 0; i < priv->num_grps; i++) 632 for (i = 0; i < priv->num_grps; i++) {
565 napi_enable(&priv->gfargrp[i].napi); 633 napi_enable(&priv->gfargrp[i].napi_rx);
634 napi_enable(&priv->gfargrp[i].napi_tx);
635 }
566} 636}
567 637
568static int gfar_parse_group(struct device_node *np, 638static int gfar_parse_group(struct device_node *np,
569 struct gfar_private *priv, const char *model) 639 struct gfar_private *priv, const char *model)
570{ 640{
571 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; 641 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
572 u32 *queue_mask;
573 int i; 642 int i;
574 643
575 for (i = 0; i < GFAR_NUM_IRQS; i++) { 644 for (i = 0; i < GFAR_NUM_IRQS; i++) {
@@ -598,16 +667,52 @@ static int gfar_parse_group(struct device_node *np,
598 grp->priv = priv; 667 grp->priv = priv;
599 spin_lock_init(&grp->grplock); 668 spin_lock_init(&grp->grplock);
600 if (priv->mode == MQ_MG_MODE) { 669 if (priv->mode == MQ_MG_MODE) {
601 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); 670 u32 *rxq_mask, *txq_mask;
602 grp->rx_bit_map = queue_mask ? 671 rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
603 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 672 txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
604 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); 673
605 grp->tx_bit_map = queue_mask ? 674 if (priv->poll_mode == GFAR_SQ_POLLING) {
606 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 675 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
676 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
677 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
678 } else { /* GFAR_MQ_POLLING */
679 grp->rx_bit_map = rxq_mask ?
680 *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
681 grp->tx_bit_map = txq_mask ?
682 *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
683 }
607 } else { 684 } else {
608 grp->rx_bit_map = 0xFF; 685 grp->rx_bit_map = 0xFF;
609 grp->tx_bit_map = 0xFF; 686 grp->tx_bit_map = 0xFF;
610 } 687 }
688
689 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
690 * right to left, so we need to revert the 8 bits to get the q index
691 */
692 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
693 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
694
695 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
696 * also assign queues to groups
697 */
698 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
699 if (!grp->rx_queue)
700 grp->rx_queue = priv->rx_queue[i];
701 grp->num_rx_queues++;
702 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
703 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
704 priv->rx_queue[i]->grp = grp;
705 }
706
707 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
708 if (!grp->tx_queue)
709 grp->tx_queue = priv->tx_queue[i];
710 grp->num_tx_queues++;
711 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
712 priv->tqueue |= (TQUEUE_EN0 >> i);
713 priv->tx_queue[i]->grp = grp;
714 }
715
611 priv->num_grps++; 716 priv->num_grps++;
612 717
613 return 0; 718 return 0;
@@ -628,13 +733,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
628 const u32 *stash_idx; 733 const u32 *stash_idx;
629 unsigned int num_tx_qs, num_rx_qs; 734 unsigned int num_tx_qs, num_rx_qs;
630 u32 *tx_queues, *rx_queues; 735 u32 *tx_queues, *rx_queues;
736 unsigned short mode, poll_mode;
631 737
632 if (!np || !of_device_is_available(np)) 738 if (!np || !of_device_is_available(np))
633 return -ENODEV; 739 return -ENODEV;
634 740
635 /* parse the num of tx and rx queues */ 741 if (of_device_is_compatible(np, "fsl,etsec2")) {
742 mode = MQ_MG_MODE;
743 poll_mode = GFAR_SQ_POLLING;
744 } else {
745 mode = SQ_SG_MODE;
746 poll_mode = GFAR_SQ_POLLING;
747 }
748
749 /* parse the num of HW tx and rx queues */
636 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 750 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
637 num_tx_qs = tx_queues ? *tx_queues : 1; 751 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
752
753 if (mode == SQ_SG_MODE) {
754 num_tx_qs = 1;
755 num_rx_qs = 1;
756 } else { /* MQ_MG_MODE */
757 /* get the actual number of supported groups */
758 unsigned int num_grps = of_get_available_child_count(np);
759
760 if (num_grps == 0 || num_grps > MAXGROUPS) {
761 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
762 num_grps);
763 pr_err("Cannot do alloc_etherdev, aborting\n");
764 return -EINVAL;
765 }
766
767 if (poll_mode == GFAR_SQ_POLLING) {
768 num_tx_qs = num_grps; /* one txq per int group */
769 num_rx_qs = num_grps; /* one rxq per int group */
770 } else { /* GFAR_MQ_POLLING */
771 num_tx_qs = tx_queues ? *tx_queues : 1;
772 num_rx_qs = rx_queues ? *rx_queues : 1;
773 }
774 }
638 775
639 if (num_tx_qs > MAX_TX_QS) { 776 if (num_tx_qs > MAX_TX_QS) {
640 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 777 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
@@ -643,9 +780,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
643 return -EINVAL; 780 return -EINVAL;
644 } 781 }
645 782
646 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
647 num_rx_qs = rx_queues ? *rx_queues : 1;
648
649 if (num_rx_qs > MAX_RX_QS) { 783 if (num_rx_qs > MAX_RX_QS) {
650 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 784 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
651 num_rx_qs, MAX_RX_QS); 785 num_rx_qs, MAX_RX_QS);
@@ -661,10 +795,20 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
661 priv = netdev_priv(dev); 795 priv = netdev_priv(dev);
662 priv->ndev = dev; 796 priv->ndev = dev;
663 797
798 priv->mode = mode;
799 priv->poll_mode = poll_mode;
800
664 priv->num_tx_queues = num_tx_qs; 801 priv->num_tx_queues = num_tx_qs;
665 netif_set_real_num_rx_queues(dev, num_rx_qs); 802 netif_set_real_num_rx_queues(dev, num_rx_qs);
666 priv->num_rx_queues = num_rx_qs; 803 priv->num_rx_queues = num_rx_qs;
667 priv->num_grps = 0x0; 804
805 err = gfar_alloc_tx_queues(priv);
806 if (err)
807 goto tx_alloc_failed;
808
809 err = gfar_alloc_rx_queues(priv);
810 if (err)
811 goto rx_alloc_failed;
668 812
669 /* Init Rx queue filer rule set linked list */ 813 /* Init Rx queue filer rule set linked list */
670 INIT_LIST_HEAD(&priv->rx_list.list); 814 INIT_LIST_HEAD(&priv->rx_list.list);
@@ -677,52 +821,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
677 priv->gfargrp[i].regs = NULL; 821 priv->gfargrp[i].regs = NULL;
678 822
679 /* Parse and initialize group specific information */ 823 /* Parse and initialize group specific information */
680 if (of_device_is_compatible(np, "fsl,etsec2")) { 824 if (priv->mode == MQ_MG_MODE) {
681 priv->mode = MQ_MG_MODE;
682 for_each_child_of_node(np, child) { 825 for_each_child_of_node(np, child) {
683 err = gfar_parse_group(child, priv, model); 826 err = gfar_parse_group(child, priv, model);
684 if (err) 827 if (err)
685 goto err_grp_init; 828 goto err_grp_init;
686 } 829 }
687 } else { 830 } else { /* SQ_SG_MODE */
688 priv->mode = SQ_SG_MODE;
689 err = gfar_parse_group(np, priv, model); 831 err = gfar_parse_group(np, priv, model);
690 if (err) 832 if (err)
691 goto err_grp_init; 833 goto err_grp_init;
692 } 834 }
693 835
694 for (i = 0; i < priv->num_tx_queues; i++)
695 priv->tx_queue[i] = NULL;
696 for (i = 0; i < priv->num_rx_queues; i++)
697 priv->rx_queue[i] = NULL;
698
699 for (i = 0; i < priv->num_tx_queues; i++) {
700 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
701 GFP_KERNEL);
702 if (!priv->tx_queue[i]) {
703 err = -ENOMEM;
704 goto tx_alloc_failed;
705 }
706 priv->tx_queue[i]->tx_skbuff = NULL;
707 priv->tx_queue[i]->qindex = i;
708 priv->tx_queue[i]->dev = dev;
709 spin_lock_init(&(priv->tx_queue[i]->txlock));
710 }
711
712 for (i = 0; i < priv->num_rx_queues; i++) {
713 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
714 GFP_KERNEL);
715 if (!priv->rx_queue[i]) {
716 err = -ENOMEM;
717 goto rx_alloc_failed;
718 }
719 priv->rx_queue[i]->rx_skbuff = NULL;
720 priv->rx_queue[i]->qindex = i;
721 priv->rx_queue[i]->dev = dev;
722 spin_lock_init(&(priv->rx_queue[i]->rxlock));
723 }
724
725
726 stash = of_get_property(np, "bd-stash", NULL); 836 stash = of_get_property(np, "bd-stash", NULL);
727 837
728 if (stash) { 838 if (stash) {
@@ -749,17 +859,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
749 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 859 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
750 860
751 if (model && !strcasecmp(model, "TSEC")) 861 if (model && !strcasecmp(model, "TSEC"))
752 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 862 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
753 FSL_GIANFAR_DEV_HAS_COALESCE | 863 FSL_GIANFAR_DEV_HAS_COALESCE |
754 FSL_GIANFAR_DEV_HAS_RMON | 864 FSL_GIANFAR_DEV_HAS_RMON |
755 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 865 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
756 866
757 if (model && !strcasecmp(model, "eTSEC")) 867 if (model && !strcasecmp(model, "eTSEC"))
758 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 868 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
759 FSL_GIANFAR_DEV_HAS_COALESCE | 869 FSL_GIANFAR_DEV_HAS_COALESCE |
760 FSL_GIANFAR_DEV_HAS_RMON | 870 FSL_GIANFAR_DEV_HAS_RMON |
761 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 871 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
762 FSL_GIANFAR_DEV_HAS_PADDING |
763 FSL_GIANFAR_DEV_HAS_CSUM | 872 FSL_GIANFAR_DEV_HAS_CSUM |
764 FSL_GIANFAR_DEV_HAS_VLAN | 873 FSL_GIANFAR_DEV_HAS_VLAN |
765 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 874 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
@@ -784,12 +893,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
784 893
785 return 0; 894 return 0;
786 895
787rx_alloc_failed:
788 free_rx_pointers(priv);
789tx_alloc_failed:
790 free_tx_pointers(priv);
791err_grp_init: 896err_grp_init:
792 unmap_group_regs(priv); 897 unmap_group_regs(priv);
898rx_alloc_failed:
899 gfar_free_rx_queues(priv);
900tx_alloc_failed:
901 gfar_free_tx_queues(priv);
793 free_gfar_dev(priv); 902 free_gfar_dev(priv);
794 return err; 903 return err;
795} 904}
@@ -822,18 +931,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
822 switch (config.rx_filter) { 931 switch (config.rx_filter) {
823 case HWTSTAMP_FILTER_NONE: 932 case HWTSTAMP_FILTER_NONE:
824 if (priv->hwts_rx_en) { 933 if (priv->hwts_rx_en) {
825 stop_gfar(netdev);
826 priv->hwts_rx_en = 0; 934 priv->hwts_rx_en = 0;
827 startup_gfar(netdev); 935 reset_gfar(netdev);
828 } 936 }
829 break; 937 break;
830 default: 938 default:
831 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 939 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
832 return -ERANGE; 940 return -ERANGE;
833 if (!priv->hwts_rx_en) { 941 if (!priv->hwts_rx_en) {
834 stop_gfar(netdev);
835 priv->hwts_rx_en = 1; 942 priv->hwts_rx_en = 1;
836 startup_gfar(netdev); 943 reset_gfar(netdev);
837 } 944 }
838 config.rx_filter = HWTSTAMP_FILTER_ALL; 945 config.rx_filter = HWTSTAMP_FILTER_ALL;
839 break; 946 break;
@@ -875,19 +982,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
875 return phy_mii_ioctl(priv->phydev, rq, cmd); 982 return phy_mii_ioctl(priv->phydev, rq, cmd);
876} 983}
877 984
878static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
879{
880 unsigned int new_bit_map = 0x0;
881 int mask = 0x1 << (max_qs - 1), i;
882
883 for (i = 0; i < max_qs; i++) {
884 if (bit_map & mask)
885 new_bit_map = new_bit_map + (1 << i);
886 mask = mask >> 0x1;
887 }
888 return new_bit_map;
889}
890
891static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 985static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
892 u32 class) 986 u32 class)
893{ 987{
@@ -1005,99 +1099,140 @@ static void gfar_detect_errata(struct gfar_private *priv)
1005 priv->errata); 1099 priv->errata);
1006} 1100}
1007 1101
1008/* Set up the ethernet device structure, private data, 1102void gfar_mac_reset(struct gfar_private *priv)
1009 * and anything else we need before we start
1010 */
1011static int gfar_probe(struct platform_device *ofdev)
1012{ 1103{
1104 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1013 u32 tempval; 1105 u32 tempval;
1014 struct net_device *dev = NULL;
1015 struct gfar_private *priv = NULL;
1016 struct gfar __iomem *regs = NULL;
1017 int err = 0, i, grp_idx = 0;
1018 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
1019 u32 isrg = 0;
1020 u32 __iomem *baddr;
1021
1022 err = gfar_of_init(ofdev, &dev);
1023
1024 if (err)
1025 return err;
1026
1027 priv = netdev_priv(dev);
1028 priv->ndev = dev;
1029 priv->ofdev = ofdev;
1030 priv->dev = &ofdev->dev;
1031 SET_NETDEV_DEV(dev, &ofdev->dev);
1032
1033 spin_lock_init(&priv->bflock);
1034 INIT_WORK(&priv->reset_task, gfar_reset_task);
1035
1036 platform_set_drvdata(ofdev, priv);
1037 regs = priv->gfargrp[0].regs;
1038
1039 gfar_detect_errata(priv);
1040
1041 /* Stop the DMA engine now, in case it was running before
1042 * (The firmware could have used it, and left it running).
1043 */
1044 gfar_halt(dev);
1045 1106
1046 /* Reset MAC layer */ 1107 /* Reset MAC layer */
1047 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET); 1108 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1048 1109
1049 /* We need to delay at least 3 TX clocks */ 1110 /* We need to delay at least 3 TX clocks */
1050 udelay(2); 1111 udelay(3);
1051 1112
1052 tempval = 0;
1053 if (!priv->pause_aneg_en && priv->tx_pause_en)
1054 tempval |= MACCFG1_TX_FLOW;
1055 if (!priv->pause_aneg_en && priv->rx_pause_en)
1056 tempval |= MACCFG1_RX_FLOW;
1057 /* the soft reset bit is not self-resetting, so we need to 1113 /* the soft reset bit is not self-resetting, so we need to
1058 * clear it before resuming normal operation 1114 * clear it before resuming normal operation
1059 */ 1115 */
1060 gfar_write(&regs->maccfg1, tempval); 1116 gfar_write(&regs->maccfg1, 0);
1117
1118 udelay(3);
1119
1120 /* Compute rx_buff_size based on config flags */
1121 gfar_rx_buff_size_config(priv);
1122
1123 /* Initialize the max receive frame/buffer lengths */
1124 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1125 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1126
1127 /* Initialize the Minimum Frame Length Register */
1128 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1061 1129
1062 /* Initialize MACCFG2. */ 1130 /* Initialize MACCFG2. */
1063 tempval = MACCFG2_INIT_SETTINGS; 1131 tempval = MACCFG2_INIT_SETTINGS;
1064 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1132
1133 /* If the mtu is larger than the max size for standard
1134 * ethernet frames (ie, a jumbo frame), then set maccfg2
1135 * to allow huge frames, and to check the length
1136 */
1137 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1138 gfar_has_errata(priv, GFAR_ERRATA_74))
1065 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1139 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1140
1066 gfar_write(&regs->maccfg2, tempval); 1141 gfar_write(&regs->maccfg2, tempval);
1067 1142
1143 /* Clear mac addr hash registers */
1144 gfar_write(&regs->igaddr0, 0);
1145 gfar_write(&regs->igaddr1, 0);
1146 gfar_write(&regs->igaddr2, 0);
1147 gfar_write(&regs->igaddr3, 0);
1148 gfar_write(&regs->igaddr4, 0);
1149 gfar_write(&regs->igaddr5, 0);
1150 gfar_write(&regs->igaddr6, 0);
1151 gfar_write(&regs->igaddr7, 0);
1152
1153 gfar_write(&regs->gaddr0, 0);
1154 gfar_write(&regs->gaddr1, 0);
1155 gfar_write(&regs->gaddr2, 0);
1156 gfar_write(&regs->gaddr3, 0);
1157 gfar_write(&regs->gaddr4, 0);
1158 gfar_write(&regs->gaddr5, 0);
1159 gfar_write(&regs->gaddr6, 0);
1160 gfar_write(&regs->gaddr7, 0);
1161
1162 if (priv->extended_hash)
1163 gfar_clear_exact_match(priv->ndev);
1164
1165 gfar_mac_rx_config(priv);
1166
1167 gfar_mac_tx_config(priv);
1168
1169 gfar_set_mac_address(priv->ndev);
1170
1171 gfar_set_multi(priv->ndev);
1172
1173 /* clear ievent and imask before configuring coalescing */
1174 gfar_ints_disable(priv);
1175
1176 /* Configure the coalescing support */
1177 gfar_configure_coalescing_all(priv);
1178}
1179
1180static void gfar_hw_init(struct gfar_private *priv)
1181{
1182 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1183 u32 attrs;
1184
1185 /* Stop the DMA engine now, in case it was running before
1186 * (The firmware could have used it, and left it running).
1187 */
1188 gfar_halt(priv);
1189
1190 gfar_mac_reset(priv);
1191
1192 /* Zero out the rmon mib registers if it has them */
1193 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1194 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1195
1196 /* Mask off the CAM interrupts */
1197 gfar_write(&regs->rmon.cam1, 0xffffffff);
1198 gfar_write(&regs->rmon.cam2, 0xffffffff);
1199 }
1200
1068 /* Initialize ECNTRL */ 1201 /* Initialize ECNTRL */
1069 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); 1202 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1070 1203
1071 /* Set the dev->base_addr to the gfar reg region */ 1204 /* Set the extraction length and index */
1072 dev->base_addr = (unsigned long) regs; 1205 attrs = ATTRELI_EL(priv->rx_stash_size) |
1206 ATTRELI_EI(priv->rx_stash_index);
1073 1207
1074 /* Fill in the dev structure */ 1208 gfar_write(&regs->attreli, attrs);
1075 dev->watchdog_timeo = TX_TIMEOUT;
1076 dev->mtu = 1500;
1077 dev->netdev_ops = &gfar_netdev_ops;
1078 dev->ethtool_ops = &gfar_ethtool_ops;
1079 1209
1080 /* Register for napi ...We are registering NAPI for each grp */ 1210 /* Start with defaults, and add stashing
1081 if (priv->mode == SQ_SG_MODE) 1211 * depending on driver parameters
1082 netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, 1212 */
1083 GFAR_DEV_WEIGHT); 1213 attrs = ATTR_INIT_SETTINGS;
1084 else
1085 for (i = 0; i < priv->num_grps; i++)
1086 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1087 GFAR_DEV_WEIGHT);
1088 1214
1089 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1215 if (priv->bd_stash_en)
1090 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1216 attrs |= ATTR_BDSTASH;
1091 NETIF_F_RXCSUM;
1092 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1093 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1094 }
1095 1217
1096 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1218 if (priv->rx_stash_size != 0)
1097 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1219 attrs |= ATTR_BUFSTASH;
1098 NETIF_F_HW_VLAN_CTAG_RX; 1220
1099 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1221 gfar_write(&regs->attr, attrs);
1100 } 1222
1223 /* FIFO configs */
1224 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1225 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1226 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1227
1228 /* Program the interrupt steering regs, only for MG devices */
1229 if (priv->num_grps > 1)
1230 gfar_write_isrg(priv);
1231}
1232
1233static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
1234{
1235 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1101 1236
1102 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1237 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1103 priv->extended_hash = 1; 1238 priv->extended_hash = 1;
@@ -1133,68 +1268,81 @@ static int gfar_probe(struct platform_device *ofdev)
1133 priv->hash_regs[6] = &regs->gaddr6; 1268 priv->hash_regs[6] = &regs->gaddr6;
1134 priv->hash_regs[7] = &regs->gaddr7; 1269 priv->hash_regs[7] = &regs->gaddr7;
1135 } 1270 }
1271}
1136 1272
1137 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1273/* Set up the ethernet device structure, private data,
1138 priv->padding = DEFAULT_PADDING; 1274 * and anything else we need before we start
1139 else 1275 */
1140 priv->padding = 0; 1276static int gfar_probe(struct platform_device *ofdev)
1277{
1278 struct net_device *dev = NULL;
1279 struct gfar_private *priv = NULL;
1280 int err = 0, i;
1141 1281
1142 if (dev->features & NETIF_F_IP_CSUM || 1282 err = gfar_of_init(ofdev, &dev);
1143 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1144 dev->needed_headroom = GMAC_FCB_LEN;
1145 1283
1146 /* Program the isrg regs only if number of grps > 1 */ 1284 if (err)
1147 if (priv->num_grps > 1) { 1285 return err;
1148 baddr = &regs->isrg0; 1286
1149 for (i = 0; i < priv->num_grps; i++) { 1287 priv = netdev_priv(dev);
1150 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1288 priv->ndev = dev;
1151 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1289 priv->ofdev = ofdev;
1152 gfar_write(baddr, isrg); 1290 priv->dev = &ofdev->dev;
1153 baddr++; 1291 SET_NETDEV_DEV(dev, &ofdev->dev);
1154 isrg = 0x0; 1292
1293 spin_lock_init(&priv->bflock);
1294 INIT_WORK(&priv->reset_task, gfar_reset_task);
1295
1296 platform_set_drvdata(ofdev, priv);
1297
1298 gfar_detect_errata(priv);
1299
1300 /* Set the dev->base_addr to the gfar reg region */
1301 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1302
1303 /* Fill in the dev structure */
1304 dev->watchdog_timeo = TX_TIMEOUT;
1305 dev->mtu = 1500;
1306 dev->netdev_ops = &gfar_netdev_ops;
1307 dev->ethtool_ops = &gfar_ethtool_ops;
1308
1309 /* Register for napi ...We are registering NAPI for each grp */
1310 for (i = 0; i < priv->num_grps; i++) {
1311 if (priv->poll_mode == GFAR_SQ_POLLING) {
1312 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1313 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1314 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1315 gfar_poll_tx_sq, 2);
1316 } else {
1317 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1318 gfar_poll_rx, GFAR_DEV_WEIGHT);
1319 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1320 gfar_poll_tx, 2);
1155 } 1321 }
1156 } 1322 }
1157 1323
1158 /* Need to reverse the bit maps as bit_map's MSB is q0 1324 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1159 * but, for_each_set_bit parses from right to left, which 1325 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1160 * basically reverses the queue numbers 1326 NETIF_F_RXCSUM;
1161 */ 1327 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1162 for (i = 0; i< priv->num_grps; i++) { 1328 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1163 priv->gfargrp[i].tx_bit_map =
1164 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1165 priv->gfargrp[i].rx_bit_map =
1166 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1167 } 1329 }
1168 1330
1169 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1331 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1170 * also assign queues to groups 1332 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1171 */ 1333 NETIF_F_HW_VLAN_CTAG_RX;
1172 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1334 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1173 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1174
1175 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1176 priv->num_rx_queues) {
1177 priv->gfargrp[grp_idx].num_rx_queues++;
1178 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1179 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1180 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1181 }
1182 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1183
1184 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1185 priv->num_tx_queues) {
1186 priv->gfargrp[grp_idx].num_tx_queues++;
1187 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1188 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1189 tqueue = tqueue | (TQUEUE_EN0 >> i);
1190 }
1191 priv->gfargrp[grp_idx].rstat = rstat;
1192 priv->gfargrp[grp_idx].tstat = tstat;
1193 rstat = tstat =0;
1194 } 1335 }
1195 1336
1196 gfar_write(&regs->rqueue, rqueue); 1337 gfar_init_addr_hash_table(priv);
1197 gfar_write(&regs->tqueue, tqueue); 1338
1339 /* Insert receive time stamps into padding alignment bytes */
1340 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1341 priv->padding = 8;
1342
1343 if (dev->features & NETIF_F_IP_CSUM ||
1344 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1345 dev->needed_headroom = GMAC_FCB_LEN;
1198 1346
1199 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1347 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1200 1348
@@ -1220,8 +1368,9 @@ static int gfar_probe(struct platform_device *ofdev)
1220 if (priv->num_tx_queues == 1) 1368 if (priv->num_tx_queues == 1)
1221 priv->prio_sched_en = 1; 1369 priv->prio_sched_en = 1;
1222 1370
1223 /* Carrier starts down, phylib will bring it up */ 1371 set_bit(GFAR_DOWN, &priv->state);
1224 netif_carrier_off(dev); 1372
1373 gfar_hw_init(priv);
1225 1374
1226 err = register_netdev(dev); 1375 err = register_netdev(dev);
1227 1376
@@ -1230,6 +1379,9 @@ static int gfar_probe(struct platform_device *ofdev)
1230 goto register_fail; 1379 goto register_fail;
1231 } 1380 }
1232 1381
1382 /* Carrier starts down, phylib will bring it up */
1383 netif_carrier_off(dev);
1384
1233 device_init_wakeup(&dev->dev, 1385 device_init_wakeup(&dev->dev,
1234 priv->device_flags & 1386 priv->device_flags &
1235 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1387 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1251,9 +1403,6 @@ static int gfar_probe(struct platform_device *ofdev)
1251 /* Initialize the filer table */ 1403 /* Initialize the filer table */
1252 gfar_init_filer_table(priv); 1404 gfar_init_filer_table(priv);
1253 1405
1254 /* Create all the sysfs files */
1255 gfar_init_sysfs(dev);
1256
1257 /* Print out the device info */ 1406 /* Print out the device info */
1258 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1407 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1259 1408
@@ -1272,8 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
1272 1421
1273register_fail: 1422register_fail:
1274 unmap_group_regs(priv); 1423 unmap_group_regs(priv);
1275 free_tx_pointers(priv); 1424 gfar_free_rx_queues(priv);
1276 free_rx_pointers(priv); 1425 gfar_free_tx_queues(priv);
1277 if (priv->phy_node) 1426 if (priv->phy_node)
1278 of_node_put(priv->phy_node); 1427 of_node_put(priv->phy_node);
1279 if (priv->tbi_node) 1428 if (priv->tbi_node)
@@ -1293,6 +1442,8 @@ static int gfar_remove(struct platform_device *ofdev)
1293 1442
1294 unregister_netdev(priv->ndev); 1443 unregister_netdev(priv->ndev);
1295 unmap_group_regs(priv); 1444 unmap_group_regs(priv);
1445 gfar_free_rx_queues(priv);
1446 gfar_free_tx_queues(priv);
1296 free_gfar_dev(priv); 1447 free_gfar_dev(priv);
1297 1448
1298 return 0; 1449 return 0;
@@ -1318,9 +1469,8 @@ static int gfar_suspend(struct device *dev)
1318 1469
1319 local_irq_save(flags); 1470 local_irq_save(flags);
1320 lock_tx_qs(priv); 1471 lock_tx_qs(priv);
1321 lock_rx_qs(priv);
1322 1472
1323 gfar_halt_nodisable(ndev); 1473 gfar_halt_nodisable(priv);
1324 1474
1325 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1475 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1326 tempval = gfar_read(&regs->maccfg1); 1476 tempval = gfar_read(&regs->maccfg1);
@@ -1332,7 +1482,6 @@ static int gfar_suspend(struct device *dev)
1332 1482
1333 gfar_write(&regs->maccfg1, tempval); 1483 gfar_write(&regs->maccfg1, tempval);
1334 1484
1335 unlock_rx_qs(priv);
1336 unlock_tx_qs(priv); 1485 unlock_tx_qs(priv);
1337 local_irq_restore(flags); 1486 local_irq_restore(flags);
1338 1487
@@ -1378,15 +1527,13 @@ static int gfar_resume(struct device *dev)
1378 */ 1527 */
1379 local_irq_save(flags); 1528 local_irq_save(flags);
1380 lock_tx_qs(priv); 1529 lock_tx_qs(priv);
1381 lock_rx_qs(priv);
1382 1530
1383 tempval = gfar_read(&regs->maccfg2); 1531 tempval = gfar_read(&regs->maccfg2);
1384 tempval &= ~MACCFG2_MPEN; 1532 tempval &= ~MACCFG2_MPEN;
1385 gfar_write(&regs->maccfg2, tempval); 1533 gfar_write(&regs->maccfg2, tempval);
1386 1534
1387 gfar_start(ndev); 1535 gfar_start(priv);
1388 1536
1389 unlock_rx_qs(priv);
1390 unlock_tx_qs(priv); 1537 unlock_tx_qs(priv);
1391 local_irq_restore(flags); 1538 local_irq_restore(flags);
1392 1539
@@ -1413,10 +1560,11 @@ static int gfar_restore(struct device *dev)
1413 return -ENOMEM; 1560 return -ENOMEM;
1414 } 1561 }
1415 1562
1416 init_registers(ndev); 1563 gfar_mac_reset(priv);
1417 gfar_set_mac_address(ndev); 1564
1418 gfar_init_mac(ndev); 1565 gfar_init_tx_rx_base(priv);
1419 gfar_start(ndev); 1566
1567 gfar_start(priv);
1420 1568
1421 priv->oldlink = 0; 1569 priv->oldlink = 0;
1422 priv->oldspeed = 0; 1570 priv->oldspeed = 0;
@@ -1574,57 +1722,6 @@ static void gfar_configure_serdes(struct net_device *dev)
1574 BMCR_SPEED1000); 1722 BMCR_SPEED1000);
1575} 1723}
1576 1724
1577static void init_registers(struct net_device *dev)
1578{
1579 struct gfar_private *priv = netdev_priv(dev);
1580 struct gfar __iomem *regs = NULL;
1581 int i;
1582
1583 for (i = 0; i < priv->num_grps; i++) {
1584 regs = priv->gfargrp[i].regs;
1585 /* Clear IEVENT */
1586 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1587
1588 /* Initialize IMASK */
1589 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1590 }
1591
1592 regs = priv->gfargrp[0].regs;
1593 /* Init hash registers to zero */
1594 gfar_write(&regs->igaddr0, 0);
1595 gfar_write(&regs->igaddr1, 0);
1596 gfar_write(&regs->igaddr2, 0);
1597 gfar_write(&regs->igaddr3, 0);
1598 gfar_write(&regs->igaddr4, 0);
1599 gfar_write(&regs->igaddr5, 0);
1600 gfar_write(&regs->igaddr6, 0);
1601 gfar_write(&regs->igaddr7, 0);
1602
1603 gfar_write(&regs->gaddr0, 0);
1604 gfar_write(&regs->gaddr1, 0);
1605 gfar_write(&regs->gaddr2, 0);
1606 gfar_write(&regs->gaddr3, 0);
1607 gfar_write(&regs->gaddr4, 0);
1608 gfar_write(&regs->gaddr5, 0);
1609 gfar_write(&regs->gaddr6, 0);
1610 gfar_write(&regs->gaddr7, 0);
1611
1612 /* Zero out the rmon mib registers if it has them */
1613 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1614 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1615
1616 /* Mask off the CAM interrupts */
1617 gfar_write(&regs->rmon.cam1, 0xffffffff);
1618 gfar_write(&regs->rmon.cam2, 0xffffffff);
1619 }
1620
1621 /* Initialize the max receive buffer length */
1622 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1623
1624 /* Initialize the Minimum Frame Length Register */
1625 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1626}
1627
1628static int __gfar_is_rx_idle(struct gfar_private *priv) 1725static int __gfar_is_rx_idle(struct gfar_private *priv)
1629{ 1726{
1630 u32 res; 1727 u32 res;
@@ -1648,23 +1745,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
1648} 1745}
1649 1746
1650/* Halt the receive and transmit queues */ 1747/* Halt the receive and transmit queues */
1651static void gfar_halt_nodisable(struct net_device *dev) 1748static void gfar_halt_nodisable(struct gfar_private *priv)
1652{ 1749{
1653 struct gfar_private *priv = netdev_priv(dev); 1750 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1654 struct gfar __iomem *regs = NULL;
1655 u32 tempval; 1751 u32 tempval;
1656 int i;
1657
1658 for (i = 0; i < priv->num_grps; i++) {
1659 regs = priv->gfargrp[i].regs;
1660 /* Mask all interrupts */
1661 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1662 1752
1663 /* Clear all interrupts */ 1753 gfar_ints_disable(priv);
1664 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1665 }
1666 1754
1667 regs = priv->gfargrp[0].regs;
1668 /* Stop the DMA, and wait for it to stop */ 1755 /* Stop the DMA, and wait for it to stop */
1669 tempval = gfar_read(&regs->dmactrl); 1756 tempval = gfar_read(&regs->dmactrl);
1670 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != 1757 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
@@ -1685,56 +1772,41 @@ static void gfar_halt_nodisable(struct net_device *dev)
1685} 1772}
1686 1773
1687/* Halt the receive and transmit queues */ 1774/* Halt the receive and transmit queues */
1688void gfar_halt(struct net_device *dev) 1775void gfar_halt(struct gfar_private *priv)
1689{ 1776{
1690 struct gfar_private *priv = netdev_priv(dev);
1691 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1777 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1692 u32 tempval; 1778 u32 tempval;
1693 1779
1694 gfar_halt_nodisable(dev); 1780 /* Dissable the Rx/Tx hw queues */
1781 gfar_write(&regs->rqueue, 0);
1782 gfar_write(&regs->tqueue, 0);
1695 1783
1696 /* Disable Rx and Tx */ 1784 mdelay(10);
1785
1786 gfar_halt_nodisable(priv);
1787
1788 /* Disable Rx/Tx DMA */
1697 tempval = gfar_read(&regs->maccfg1); 1789 tempval = gfar_read(&regs->maccfg1);
1698 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1790 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1699 gfar_write(&regs->maccfg1, tempval); 1791 gfar_write(&regs->maccfg1, tempval);
1700} 1792}
1701 1793
1702static void free_grp_irqs(struct gfar_priv_grp *grp)
1703{
1704 free_irq(gfar_irq(grp, TX)->irq, grp);
1705 free_irq(gfar_irq(grp, RX)->irq, grp);
1706 free_irq(gfar_irq(grp, ER)->irq, grp);
1707}
1708
1709void stop_gfar(struct net_device *dev) 1794void stop_gfar(struct net_device *dev)
1710{ 1795{
1711 struct gfar_private *priv = netdev_priv(dev); 1796 struct gfar_private *priv = netdev_priv(dev);
1712 unsigned long flags;
1713 int i;
1714
1715 phy_stop(priv->phydev);
1716 1797
1798 netif_tx_stop_all_queues(dev);
1717 1799
1718 /* Lock it down */ 1800 smp_mb__before_clear_bit();
1719 local_irq_save(flags); 1801 set_bit(GFAR_DOWN, &priv->state);
1720 lock_tx_qs(priv); 1802 smp_mb__after_clear_bit();
1721 lock_rx_qs(priv);
1722 1803
1723 gfar_halt(dev); 1804 disable_napi(priv);
1724 1805
1725 unlock_rx_qs(priv); 1806 /* disable ints and gracefully shut down Rx/Tx DMA */
1726 unlock_tx_qs(priv); 1807 gfar_halt(priv);
1727 local_irq_restore(flags);
1728 1808
1729 /* Free the IRQs */ 1809 phy_stop(priv->phydev);
1730 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1731 for (i = 0; i < priv->num_grps; i++)
1732 free_grp_irqs(&priv->gfargrp[i]);
1733 } else {
1734 for (i = 0; i < priv->num_grps; i++)
1735 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
1736 &priv->gfargrp[i]);
1737 }
1738 1810
1739 free_skb_resources(priv); 1811 free_skb_resources(priv);
1740} 1812}
@@ -1825,17 +1897,15 @@ static void free_skb_resources(struct gfar_private *priv)
1825 priv->tx_queue[0]->tx_bd_dma_base); 1897 priv->tx_queue[0]->tx_bd_dma_base);
1826} 1898}
1827 1899
1828void gfar_start(struct net_device *dev) 1900void gfar_start(struct gfar_private *priv)
1829{ 1901{
1830 struct gfar_private *priv = netdev_priv(dev);
1831 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1902 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1832 u32 tempval; 1903 u32 tempval;
1833 int i = 0; 1904 int i = 0;
1834 1905
1835 /* Enable Rx and Tx in MACCFG1 */ 1906 /* Enable Rx/Tx hw queues */
1836 tempval = gfar_read(&regs->maccfg1); 1907 gfar_write(&regs->rqueue, priv->rqueue);
1837 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1908 gfar_write(&regs->tqueue, priv->tqueue);
1838 gfar_write(&regs->maccfg1, tempval);
1839 1909
1840 /* Initialize DMACTRL to have WWR and WOP */ 1910 /* Initialize DMACTRL to have WWR and WOP */
1841 tempval = gfar_read(&regs->dmactrl); 1911 tempval = gfar_read(&regs->dmactrl);
@@ -1852,52 +1922,23 @@ void gfar_start(struct net_device *dev)
1852 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1922 /* Clear THLT/RHLT, so that the DMA starts polling now */
1853 gfar_write(&regs->tstat, priv->gfargrp[i].tstat); 1923 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1854 gfar_write(&regs->rstat, priv->gfargrp[i].rstat); 1924 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1855 /* Unmask the interrupts we look for */
1856 gfar_write(&regs->imask, IMASK_DEFAULT);
1857 } 1925 }
1858 1926
1859 dev->trans_start = jiffies; /* prevent tx timeout */ 1927 /* Enable Rx/Tx DMA */
1860} 1928 tempval = gfar_read(&regs->maccfg1);
1861 1929 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1862static void gfar_configure_coalescing(struct gfar_private *priv, 1930 gfar_write(&regs->maccfg1, tempval);
1863 unsigned long tx_mask, unsigned long rx_mask)
1864{
1865 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1866 u32 __iomem *baddr;
1867
1868 if (priv->mode == MQ_MG_MODE) {
1869 int i = 0;
1870
1871 baddr = &regs->txic0;
1872 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1873 gfar_write(baddr + i, 0);
1874 if (likely(priv->tx_queue[i]->txcoalescing))
1875 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1876 }
1877 1931
1878 baddr = &regs->rxic0; 1932 gfar_ints_enable(priv);
1879 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1880 gfar_write(baddr + i, 0);
1881 if (likely(priv->rx_queue[i]->rxcoalescing))
1882 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1883 }
1884 } else {
1885 /* Backward compatible case -- even if we enable
1886 * multiple queues, there's only single reg to program
1887 */
1888 gfar_write(&regs->txic, 0);
1889 if (likely(priv->tx_queue[0]->txcoalescing))
1890 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1891 1933
1892 gfar_write(&regs->rxic, 0); 1934 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1893 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1894 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1895 }
1896} 1935}
1897 1936
1898void gfar_configure_coalescing_all(struct gfar_private *priv) 1937static void free_grp_irqs(struct gfar_priv_grp *grp)
1899{ 1938{
1900 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1939 free_irq(gfar_irq(grp, TX)->irq, grp);
1940 free_irq(gfar_irq(grp, RX)->irq, grp);
1941 free_irq(gfar_irq(grp, ER)->irq, grp);
1901} 1942}
1902 1943
1903static int register_grp_irqs(struct gfar_priv_grp *grp) 1944static int register_grp_irqs(struct gfar_priv_grp *grp)
@@ -1956,46 +1997,65 @@ err_irq_fail:
1956 1997
1957} 1998}
1958 1999
1959/* Bring the controller up and running */ 2000static void gfar_free_irq(struct gfar_private *priv)
1960int startup_gfar(struct net_device *ndev)
1961{ 2001{
1962 struct gfar_private *priv = netdev_priv(ndev); 2002 int i;
1963 struct gfar __iomem *regs = NULL;
1964 int err, i, j;
1965 2003
1966 for (i = 0; i < priv->num_grps; i++) { 2004 /* Free the IRQs */
1967 regs= priv->gfargrp[i].regs; 2005 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1968 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 2006 for (i = 0; i < priv->num_grps; i++)
2007 free_grp_irqs(&priv->gfargrp[i]);
2008 } else {
2009 for (i = 0; i < priv->num_grps; i++)
2010 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2011 &priv->gfargrp[i]);
1969 } 2012 }
2013}
1970 2014
1971 regs= priv->gfargrp[0].regs; 2015static int gfar_request_irq(struct gfar_private *priv)
1972 err = gfar_alloc_skb_resources(ndev); 2016{
1973 if (err) 2017 int err, i, j;
1974 return err;
1975
1976 gfar_init_mac(ndev);
1977 2018
1978 for (i = 0; i < priv->num_grps; i++) { 2019 for (i = 0; i < priv->num_grps; i++) {
1979 err = register_grp_irqs(&priv->gfargrp[i]); 2020 err = register_grp_irqs(&priv->gfargrp[i]);
1980 if (err) { 2021 if (err) {
1981 for (j = 0; j < i; j++) 2022 for (j = 0; j < i; j++)
1982 free_grp_irqs(&priv->gfargrp[j]); 2023 free_grp_irqs(&priv->gfargrp[j]);
1983 goto irq_fail; 2024 return err;
1984 } 2025 }
1985 } 2026 }
1986 2027
1987 /* Start the controller */ 2028 return 0;
1988 gfar_start(ndev); 2029}
2030
2031/* Bring the controller up and running */
2032int startup_gfar(struct net_device *ndev)
2033{
2034 struct gfar_private *priv = netdev_priv(ndev);
2035 int err;
2036
2037 gfar_mac_reset(priv);
2038
2039 err = gfar_alloc_skb_resources(ndev);
2040 if (err)
2041 return err;
2042
2043 gfar_init_tx_rx_base(priv);
2044
2045 smp_mb__before_clear_bit();
2046 clear_bit(GFAR_DOWN, &priv->state);
2047 smp_mb__after_clear_bit();
2048
2049 /* Start Rx/Tx DMA and enable the interrupts */
2050 gfar_start(priv);
1989 2051
1990 phy_start(priv->phydev); 2052 phy_start(priv->phydev);
1991 2053
1992 gfar_configure_coalescing_all(priv); 2054 enable_napi(priv);
1993 2055
1994 return 0; 2056 netif_tx_wake_all_queues(ndev);
1995 2057
1996irq_fail: 2058 return 0;
1997 free_skb_resources(priv);
1998 return err;
1999} 2059}
2000 2060
2001/* Called when something needs to use the ethernet device 2061/* Called when something needs to use the ethernet device
@@ -2006,27 +2066,17 @@ static int gfar_enet_open(struct net_device *dev)
2006 struct gfar_private *priv = netdev_priv(dev); 2066 struct gfar_private *priv = netdev_priv(dev);
2007 int err; 2067 int err;
2008 2068
2009 enable_napi(priv);
2010
2011 /* Initialize a bunch of registers */
2012 init_registers(dev);
2013
2014 gfar_set_mac_address(dev);
2015
2016 err = init_phy(dev); 2069 err = init_phy(dev);
2070 if (err)
2071 return err;
2017 2072
2018 if (err) { 2073 err = gfar_request_irq(priv);
2019 disable_napi(priv); 2074 if (err)
2020 return err; 2075 return err;
2021 }
2022 2076
2023 err = startup_gfar(dev); 2077 err = startup_gfar(dev);
2024 if (err) { 2078 if (err)
2025 disable_napi(priv);
2026 return err; 2079 return err;
2027 }
2028
2029 netif_tx_start_all_queues(dev);
2030 2080
2031 device_set_wakeup_enable(&dev->dev, priv->wol_en); 2081 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2032 2082
@@ -2152,13 +2202,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2152 skb_new = skb_realloc_headroom(skb, fcb_len); 2202 skb_new = skb_realloc_headroom(skb, fcb_len);
2153 if (!skb_new) { 2203 if (!skb_new) {
2154 dev->stats.tx_errors++; 2204 dev->stats.tx_errors++;
2155 kfree_skb(skb); 2205 dev_kfree_skb_any(skb);
2156 return NETDEV_TX_OK; 2206 return NETDEV_TX_OK;
2157 } 2207 }
2158 2208
2159 if (skb->sk) 2209 if (skb->sk)
2160 skb_set_owner_w(skb_new, skb->sk); 2210 skb_set_owner_w(skb_new, skb->sk);
2161 consume_skb(skb); 2211 dev_consume_skb_any(skb);
2162 skb = skb_new; 2212 skb = skb_new;
2163 } 2213 }
2164 2214
@@ -2351,8 +2401,6 @@ static int gfar_close(struct net_device *dev)
2351{ 2401{
2352 struct gfar_private *priv = netdev_priv(dev); 2402 struct gfar_private *priv = netdev_priv(dev);
2353 2403
2354 disable_napi(priv);
2355
2356 cancel_work_sync(&priv->reset_task); 2404 cancel_work_sync(&priv->reset_task);
2357 stop_gfar(dev); 2405 stop_gfar(dev);
2358 2406
@@ -2360,7 +2408,7 @@ static int gfar_close(struct net_device *dev)
2360 phy_disconnect(priv->phydev); 2408 phy_disconnect(priv->phydev);
2361 priv->phydev = NULL; 2409 priv->phydev = NULL;
2362 2410
2363 netif_tx_stop_all_queues(dev); 2411 gfar_free_irq(priv);
2364 2412
2365 return 0; 2413 return 0;
2366} 2414}
@@ -2373,77 +2421,9 @@ static int gfar_set_mac_address(struct net_device *dev)
2373 return 0; 2421 return 0;
2374} 2422}
2375 2423
2376/* Check if rx parser should be activated */
2377void gfar_check_rx_parser_mode(struct gfar_private *priv)
2378{
2379 struct gfar __iomem *regs;
2380 u32 tempval;
2381
2382 regs = priv->gfargrp[0].regs;
2383
2384 tempval = gfar_read(&regs->rctrl);
2385 /* If parse is no longer required, then disable parser */
2386 if (tempval & RCTRL_REQ_PARSER) {
2387 tempval |= RCTRL_PRSDEP_INIT;
2388 priv->uses_rxfcb = 1;
2389 } else {
2390 tempval &= ~RCTRL_PRSDEP_INIT;
2391 priv->uses_rxfcb = 0;
2392 }
2393 gfar_write(&regs->rctrl, tempval);
2394}
2395
2396/* Enables and disables VLAN insertion/extraction */
2397void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2398{
2399 struct gfar_private *priv = netdev_priv(dev);
2400 struct gfar __iomem *regs = NULL;
2401 unsigned long flags;
2402 u32 tempval;
2403
2404 regs = priv->gfargrp[0].regs;
2405 local_irq_save(flags);
2406 lock_rx_qs(priv);
2407
2408 if (features & NETIF_F_HW_VLAN_CTAG_TX) {
2409 /* Enable VLAN tag insertion */
2410 tempval = gfar_read(&regs->tctrl);
2411 tempval |= TCTRL_VLINS;
2412 gfar_write(&regs->tctrl, tempval);
2413 } else {
2414 /* Disable VLAN tag insertion */
2415 tempval = gfar_read(&regs->tctrl);
2416 tempval &= ~TCTRL_VLINS;
2417 gfar_write(&regs->tctrl, tempval);
2418 }
2419
2420 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2421 /* Enable VLAN tag extraction */
2422 tempval = gfar_read(&regs->rctrl);
2423 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2424 gfar_write(&regs->rctrl, tempval);
2425 priv->uses_rxfcb = 1;
2426 } else {
2427 /* Disable VLAN tag extraction */
2428 tempval = gfar_read(&regs->rctrl);
2429 tempval &= ~RCTRL_VLEX;
2430 gfar_write(&regs->rctrl, tempval);
2431
2432 gfar_check_rx_parser_mode(priv);
2433 }
2434
2435 gfar_change_mtu(dev, dev->mtu);
2436
2437 unlock_rx_qs(priv);
2438 local_irq_restore(flags);
2439}
2440
2441static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2424static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2442{ 2425{
2443 int tempsize, tempval;
2444 struct gfar_private *priv = netdev_priv(dev); 2426 struct gfar_private *priv = netdev_priv(dev);
2445 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2446 int oldsize = priv->rx_buffer_size;
2447 int frame_size = new_mtu + ETH_HLEN; 2427 int frame_size = new_mtu + ETH_HLEN;
2448 2428
2449 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2429 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
@@ -2451,45 +2431,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2451 return -EINVAL; 2431 return -EINVAL;
2452 } 2432 }
2453 2433
2454 if (priv->uses_rxfcb) 2434 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2455 frame_size += GMAC_FCB_LEN; 2435 cpu_relax();
2456 2436
2457 frame_size += priv->padding; 2437 if (dev->flags & IFF_UP)
2458
2459 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2460 INCREMENTAL_BUFFER_SIZE;
2461
2462 /* Only stop and start the controller if it isn't already
2463 * stopped, and we changed something
2464 */
2465 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2466 stop_gfar(dev); 2438 stop_gfar(dev);
2467 2439
2468 priv->rx_buffer_size = tempsize;
2469
2470 dev->mtu = new_mtu; 2440 dev->mtu = new_mtu;
2471 2441
2472 gfar_write(&regs->mrblr, priv->rx_buffer_size); 2442 if (dev->flags & IFF_UP)
2473 gfar_write(&regs->maxfrm, priv->rx_buffer_size); 2443 startup_gfar(dev);
2474 2444
2475 /* If the mtu is larger than the max size for standard 2445 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2476 * ethernet frames (ie, a jumbo frame), then set maccfg2
2477 * to allow huge frames, and to check the length
2478 */
2479 tempval = gfar_read(&regs->maccfg2);
2480 2446
2481 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2447 return 0;
2482 gfar_has_errata(priv, GFAR_ERRATA_74)) 2448}
2483 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2484 else
2485 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2486 2449
2487 gfar_write(&regs->maccfg2, tempval); 2450void reset_gfar(struct net_device *ndev)
2451{
2452 struct gfar_private *priv = netdev_priv(ndev);
2488 2453
2489 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2454 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2490 startup_gfar(dev); 2455 cpu_relax();
2491 2456
2492 return 0; 2457 stop_gfar(ndev);
2458 startup_gfar(ndev);
2459
2460 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2493} 2461}
2494 2462
2495/* gfar_reset_task gets scheduled when a packet has not been 2463/* gfar_reset_task gets scheduled when a packet has not been
@@ -2501,16 +2469,7 @@ static void gfar_reset_task(struct work_struct *work)
2501{ 2469{
2502 struct gfar_private *priv = container_of(work, struct gfar_private, 2470 struct gfar_private *priv = container_of(work, struct gfar_private,
2503 reset_task); 2471 reset_task);
2504 struct net_device *dev = priv->ndev; 2472 reset_gfar(priv->ndev);
2505
2506 if (dev->flags & IFF_UP) {
2507 netif_tx_stop_all_queues(dev);
2508 stop_gfar(dev);
2509 startup_gfar(dev);
2510 netif_tx_start_all_queues(dev);
2511 }
2512
2513 netif_tx_schedule_all(dev);
2514} 2473}
2515 2474
2516static void gfar_timeout(struct net_device *dev) 2475static void gfar_timeout(struct net_device *dev)
@@ -2623,8 +2582,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2623 } 2582 }
2624 2583
2625 /* If we freed a buffer, we can restart transmission, if necessary */ 2584 /* If we freed a buffer, we can restart transmission, if necessary */
2626 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) 2585 if (tx_queue->num_txbdfree &&
2627 netif_wake_subqueue(dev, tqi); 2586 netif_tx_queue_stopped(txq) &&
2587 !(test_bit(GFAR_DOWN, &priv->state)))
2588 netif_wake_subqueue(priv->ndev, tqi);
2628 2589
2629 /* Update dirty indicators */ 2590 /* Update dirty indicators */
2630 tx_queue->skb_dirtytx = skb_dirtytx; 2591 tx_queue->skb_dirtytx = skb_dirtytx;
@@ -2633,31 +2594,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2633 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2594 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2634} 2595}
2635 2596
2636static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2637{
2638 unsigned long flags;
2639
2640 spin_lock_irqsave(&gfargrp->grplock, flags);
2641 if (napi_schedule_prep(&gfargrp->napi)) {
2642 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2643 __napi_schedule(&gfargrp->napi);
2644 } else {
2645 /* Clear IEVENT, so interrupts aren't called again
2646 * because of the packets that have already arrived.
2647 */
2648 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2649 }
2650 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2651
2652}
2653
2654/* Interrupt Handler for Transmit complete */
2655static irqreturn_t gfar_transmit(int irq, void *grp_id)
2656{
2657 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2658 return IRQ_HANDLED;
2659}
2660
2661static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2597static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2662 struct sk_buff *skb) 2598 struct sk_buff *skb)
2663{ 2599{
@@ -2728,7 +2664,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
2728 2664
2729irqreturn_t gfar_receive(int irq, void *grp_id) 2665irqreturn_t gfar_receive(int irq, void *grp_id)
2730{ 2666{
2731 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2667 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2668 unsigned long flags;
2669 u32 imask;
2670
2671 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2672 spin_lock_irqsave(&grp->grplock, flags);
2673 imask = gfar_read(&grp->regs->imask);
2674 imask &= IMASK_RX_DISABLED;
2675 gfar_write(&grp->regs->imask, imask);
2676 spin_unlock_irqrestore(&grp->grplock, flags);
2677 __napi_schedule(&grp->napi_rx);
2678 } else {
2679 /* Clear IEVENT, so interrupts aren't called again
2680 * because of the packets that have already arrived.
2681 */
2682 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2683 }
2684
2685 return IRQ_HANDLED;
2686}
2687
2688/* Interrupt Handler for Transmit complete */
2689static irqreturn_t gfar_transmit(int irq, void *grp_id)
2690{
2691 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2692 unsigned long flags;
2693 u32 imask;
2694
2695 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2696 spin_lock_irqsave(&grp->grplock, flags);
2697 imask = gfar_read(&grp->regs->imask);
2698 imask &= IMASK_TX_DISABLED;
2699 gfar_write(&grp->regs->imask, imask);
2700 spin_unlock_irqrestore(&grp->grplock, flags);
2701 __napi_schedule(&grp->napi_tx);
2702 } else {
2703 /* Clear IEVENT, so interrupts aren't called again
2704 * because of the packets that have already arrived.
2705 */
2706 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2707 }
2708
2732 return IRQ_HANDLED; 2709 return IRQ_HANDLED;
2733} 2710}
2734 2711
@@ -2852,7 +2829,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2852 rx_queue->stats.rx_bytes += pkt_len; 2829 rx_queue->stats.rx_bytes += pkt_len;
2853 skb_record_rx_queue(skb, rx_queue->qindex); 2830 skb_record_rx_queue(skb, rx_queue->qindex);
2854 gfar_process_frame(dev, skb, amount_pull, 2831 gfar_process_frame(dev, skb, amount_pull,
2855 &rx_queue->grp->napi); 2832 &rx_queue->grp->napi_rx);
2856 2833
2857 } else { 2834 } else {
2858 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2835 netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2881,66 +2858,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2881 return howmany; 2858 return howmany;
2882} 2859}
2883 2860
2884static int gfar_poll_sq(struct napi_struct *napi, int budget) 2861static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2885{ 2862{
2886 struct gfar_priv_grp *gfargrp = 2863 struct gfar_priv_grp *gfargrp =
2887 container_of(napi, struct gfar_priv_grp, napi); 2864 container_of(napi, struct gfar_priv_grp, napi_rx);
2888 struct gfar __iomem *regs = gfargrp->regs; 2865 struct gfar __iomem *regs = gfargrp->regs;
2889 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; 2866 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2890 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
2891 int work_done = 0; 2867 int work_done = 0;
2892 2868
2893 /* Clear IEVENT, so interrupts aren't called again 2869 /* Clear IEVENT, so interrupts aren't called again
2894 * because of the packets that have already arrived 2870 * because of the packets that have already arrived
2895 */ 2871 */
2896 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2872 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2897
2898 /* run Tx cleanup to completion */
2899 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2900 gfar_clean_tx_ring(tx_queue);
2901 2873
2902 work_done = gfar_clean_rx_ring(rx_queue, budget); 2874 work_done = gfar_clean_rx_ring(rx_queue, budget);
2903 2875
2904 if (work_done < budget) { 2876 if (work_done < budget) {
2877 u32 imask;
2905 napi_complete(napi); 2878 napi_complete(napi);
2906 /* Clear the halt bit in RSTAT */ 2879 /* Clear the halt bit in RSTAT */
2907 gfar_write(&regs->rstat, gfargrp->rstat); 2880 gfar_write(&regs->rstat, gfargrp->rstat);
2908 2881
2909 gfar_write(&regs->imask, IMASK_DEFAULT); 2882 spin_lock_irq(&gfargrp->grplock);
2910 2883 imask = gfar_read(&regs->imask);
2911 /* If we are coalescing interrupts, update the timer 2884 imask |= IMASK_RX_DEFAULT;
2912 * Otherwise, clear it 2885 gfar_write(&regs->imask, imask);
2913 */ 2886 spin_unlock_irq(&gfargrp->grplock);
2914 gfar_write(&regs->txic, 0);
2915 if (likely(tx_queue->txcoalescing))
2916 gfar_write(&regs->txic, tx_queue->txic);
2917
2918 gfar_write(&regs->rxic, 0);
2919 if (unlikely(rx_queue->rxcoalescing))
2920 gfar_write(&regs->rxic, rx_queue->rxic);
2921 } 2887 }
2922 2888
2923 return work_done; 2889 return work_done;
2924} 2890}
2925 2891
2926static int gfar_poll(struct napi_struct *napi, int budget) 2892static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2893{
2894 struct gfar_priv_grp *gfargrp =
2895 container_of(napi, struct gfar_priv_grp, napi_tx);
2896 struct gfar __iomem *regs = gfargrp->regs;
2897 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2898 u32 imask;
2899
2900 /* Clear IEVENT, so interrupts aren't called again
2901 * because of the packets that have already arrived
2902 */
2903 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2904
2905 /* run Tx cleanup to completion */
2906 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2907 gfar_clean_tx_ring(tx_queue);
2908
2909 napi_complete(napi);
2910
2911 spin_lock_irq(&gfargrp->grplock);
2912 imask = gfar_read(&regs->imask);
2913 imask |= IMASK_TX_DEFAULT;
2914 gfar_write(&regs->imask, imask);
2915 spin_unlock_irq(&gfargrp->grplock);
2916
2917 return 0;
2918}
2919
2920static int gfar_poll_rx(struct napi_struct *napi, int budget)
2927{ 2921{
2928 struct gfar_priv_grp *gfargrp = 2922 struct gfar_priv_grp *gfargrp =
2929 container_of(napi, struct gfar_priv_grp, napi); 2923 container_of(napi, struct gfar_priv_grp, napi_rx);
2930 struct gfar_private *priv = gfargrp->priv; 2924 struct gfar_private *priv = gfargrp->priv;
2931 struct gfar __iomem *regs = gfargrp->regs; 2925 struct gfar __iomem *regs = gfargrp->regs;
2932 struct gfar_priv_tx_q *tx_queue = NULL;
2933 struct gfar_priv_rx_q *rx_queue = NULL; 2926 struct gfar_priv_rx_q *rx_queue = NULL;
2934 int work_done = 0, work_done_per_q = 0; 2927 int work_done = 0, work_done_per_q = 0;
2935 int i, budget_per_q = 0; 2928 int i, budget_per_q = 0;
2936 int has_tx_work = 0;
2937 unsigned long rstat_rxf; 2929 unsigned long rstat_rxf;
2938 int num_act_queues; 2930 int num_act_queues;
2939 2931
2940 /* Clear IEVENT, so interrupts aren't called again 2932 /* Clear IEVENT, so interrupts aren't called again
2941 * because of the packets that have already arrived 2933 * because of the packets that have already arrived
2942 */ 2934 */
2943 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2935 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2944 2936
2945 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK; 2937 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2946 2938
@@ -2948,15 +2940,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2948 if (num_act_queues) 2940 if (num_act_queues)
2949 budget_per_q = budget/num_act_queues; 2941 budget_per_q = budget/num_act_queues;
2950 2942
2951 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2952 tx_queue = priv->tx_queue[i];
2953 /* run Tx cleanup to completion */
2954 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2955 gfar_clean_tx_ring(tx_queue);
2956 has_tx_work = 1;
2957 }
2958 }
2959
2960 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2943 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2961 /* skip queue if not active */ 2944 /* skip queue if not active */
2962 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) 2945 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
@@ -2979,25 +2962,62 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2979 } 2962 }
2980 } 2963 }
2981 2964
2982 if (!num_act_queues && !has_tx_work) { 2965 if (!num_act_queues) {
2983 2966 u32 imask;
2984 napi_complete(napi); 2967 napi_complete(napi);
2985 2968
2986 /* Clear the halt bit in RSTAT */ 2969 /* Clear the halt bit in RSTAT */
2987 gfar_write(&regs->rstat, gfargrp->rstat); 2970 gfar_write(&regs->rstat, gfargrp->rstat);
2988 2971
2989 gfar_write(&regs->imask, IMASK_DEFAULT); 2972 spin_lock_irq(&gfargrp->grplock);
2990 2973 imask = gfar_read(&regs->imask);
2991 /* If we are coalescing interrupts, update the timer 2974 imask |= IMASK_RX_DEFAULT;
2992 * Otherwise, clear it 2975 gfar_write(&regs->imask, imask);
2993 */ 2976 spin_unlock_irq(&gfargrp->grplock);
2994 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2995 gfargrp->tx_bit_map);
2996 } 2977 }
2997 2978
2998 return work_done; 2979 return work_done;
2999} 2980}
3000 2981
2982static int gfar_poll_tx(struct napi_struct *napi, int budget)
2983{
2984 struct gfar_priv_grp *gfargrp =
2985 container_of(napi, struct gfar_priv_grp, napi_tx);
2986 struct gfar_private *priv = gfargrp->priv;
2987 struct gfar __iomem *regs = gfargrp->regs;
2988 struct gfar_priv_tx_q *tx_queue = NULL;
2989 int has_tx_work = 0;
2990 int i;
2991
2992 /* Clear IEVENT, so interrupts aren't called again
2993 * because of the packets that have already arrived
2994 */
2995 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2996
2997 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2998 tx_queue = priv->tx_queue[i];
2999 /* run Tx cleanup to completion */
3000 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3001 gfar_clean_tx_ring(tx_queue);
3002 has_tx_work = 1;
3003 }
3004 }
3005
3006 if (!has_tx_work) {
3007 u32 imask;
3008 napi_complete(napi);
3009
3010 spin_lock_irq(&gfargrp->grplock);
3011 imask = gfar_read(&regs->imask);
3012 imask |= IMASK_TX_DEFAULT;
3013 gfar_write(&regs->imask, imask);
3014 spin_unlock_irq(&gfargrp->grplock);
3015 }
3016
3017 return 0;
3018}
3019
3020
3001#ifdef CONFIG_NET_POLL_CONTROLLER 3021#ifdef CONFIG_NET_POLL_CONTROLLER
3002/* Polling 'interrupt' - used by things like netconsole to send skbs 3022/* Polling 'interrupt' - used by things like netconsole to send skbs
3003 * without having to re-enable interrupts. It's not called while 3023 * without having to re-enable interrupts. It's not called while
@@ -3101,12 +3121,11 @@ static void adjust_link(struct net_device *dev)
3101{ 3121{
3102 struct gfar_private *priv = netdev_priv(dev); 3122 struct gfar_private *priv = netdev_priv(dev);
3103 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3123 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3104 unsigned long flags;
3105 struct phy_device *phydev = priv->phydev; 3124 struct phy_device *phydev = priv->phydev;
3106 int new_state = 0; 3125 int new_state = 0;
3107 3126
3108 local_irq_save(flags); 3127 if (test_bit(GFAR_RESETTING, &priv->state))
3109 lock_tx_qs(priv); 3128 return;
3110 3129
3111 if (phydev->link) { 3130 if (phydev->link) {
3112 u32 tempval1 = gfar_read(&regs->maccfg1); 3131 u32 tempval1 = gfar_read(&regs->maccfg1);
@@ -3178,8 +3197,6 @@ static void adjust_link(struct net_device *dev)
3178 3197
3179 if (new_state && netif_msg_link(priv)) 3198 if (new_state && netif_msg_link(priv))
3180 phy_print_status(phydev); 3199 phy_print_status(phydev);
3181 unlock_tx_qs(priv);
3182 local_irq_restore(flags);
3183} 3200}
3184 3201
3185/* Update the hash table based on the current list of multicast 3202/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 52bb2b0195cc..84632c569f2c 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -377,8 +377,11 @@ extern const char gfar_driver_version[];
377 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ 377 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
378 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ 378 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
379 | IMASK_PERR) 379 | IMASK_PERR)
380#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ 380#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
381 & IMASK_DEFAULT) 381#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
382
383#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
384#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
382 385
383/* Fifo management */ 386/* Fifo management */
384#define FIFO_TX_THR_MASK 0x01ff 387#define FIFO_TX_THR_MASK 0x01ff
@@ -409,7 +412,9 @@ extern const char gfar_driver_version[];
409 412
410/* This default RIR value directly corresponds 413/* This default RIR value directly corresponds
411 * to the 3-bit hash value generated */ 414 * to the 3-bit hash value generated */
412#define DEFAULT_RIR0 0x05397700 415#define DEFAULT_8RXQ_RIR0 0x05397700
416/* Map even hash values to Q0, and odd ones to Q1 */
417#define DEFAULT_2RXQ_RIR0 0x04104100
413 418
414/* RQFCR register bits */ 419/* RQFCR register bits */
415#define RQFCR_GPI 0x80000000 420#define RQFCR_GPI 0x80000000
@@ -880,7 +885,6 @@ struct gfar {
880#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 885#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
881#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 886#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
882#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 887#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
883#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
884#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 888#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
885#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 889#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
886#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 890#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
@@ -892,8 +896,8 @@ struct gfar {
892#define DEFAULT_MAPPING 0xFF 896#define DEFAULT_MAPPING 0xFF
893#endif 897#endif
894 898
895#define ISRG_SHIFT_TX 0x10 899#define ISRG_RR0 0x80000000
896#define ISRG_SHIFT_RX 0x18 900#define ISRG_TR0 0x00800000
897 901
898/* The same driver can operate in two modes */ 902/* The same driver can operate in two modes */
899/* SQ_SG_MODE: Single Queue Single Group Mode 903/* SQ_SG_MODE: Single Queue Single Group Mode
@@ -905,6 +909,22 @@ enum {
905 MQ_MG_MODE 909 MQ_MG_MODE
906}; 910};
907 911
912/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
913 * The driver supports a single pair of RX/Tx queues
914 * per interrupt group (Rx/Tx int line). MQ_MG mode
915 * devices have 2 interrupt groups, so the device will
916 * have a total of 2 Tx and 2 Rx queues in this case.
917 * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
918 * The driver supports all the 8 Rx and Tx HW queues
919 * each queue mapped by the Device Tree to one of
920 * the 2 interrupt groups. This mode implies significant
921 * processing overhead (CPU and controller level).
922 */
923enum gfar_poll_mode {
924 GFAR_SQ_POLLING = 0,
925 GFAR_MQ_POLLING
926};
927
908/* 928/*
909 * Per TX queue stats 929 * Per TX queue stats
910 */ 930 */
@@ -966,7 +986,6 @@ struct rx_q_stats {
966 986
967/** 987/**
968 * struct gfar_priv_rx_q - per rx queue structure 988 * struct gfar_priv_rx_q - per rx queue structure
969 * @rxlock: per queue rx spin lock
970 * @rx_skbuff: skb pointers 989 * @rx_skbuff: skb pointers
971 * @skb_currx: currently use skb pointer 990 * @skb_currx: currently use skb pointer
972 * @rx_bd_base: First rx buffer descriptor 991 * @rx_bd_base: First rx buffer descriptor
@@ -979,8 +998,7 @@ struct rx_q_stats {
979 */ 998 */
980 999
981struct gfar_priv_rx_q { 1000struct gfar_priv_rx_q {
982 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES))); 1001 struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
983 struct sk_buff ** rx_skbuff;
984 dma_addr_t rx_bd_dma_base; 1002 dma_addr_t rx_bd_dma_base;
985 struct rxbd8 *rx_bd_base; 1003 struct rxbd8 *rx_bd_base;
986 struct rxbd8 *cur_rx; 1004 struct rxbd8 *cur_rx;
@@ -1016,17 +1034,20 @@ struct gfar_irqinfo {
1016 */ 1034 */
1017 1035
1018struct gfar_priv_grp { 1036struct gfar_priv_grp {
1019 spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); 1037 spinlock_t grplock __aligned(SMP_CACHE_BYTES);
1020 struct napi_struct napi; 1038 struct napi_struct napi_rx;
1021 struct gfar_private *priv; 1039 struct napi_struct napi_tx;
1022 struct gfar __iomem *regs; 1040 struct gfar __iomem *regs;
1023 unsigned int rstat; 1041 struct gfar_priv_tx_q *tx_queue;
1024 unsigned long num_rx_queues; 1042 struct gfar_priv_rx_q *rx_queue;
1025 unsigned long rx_bit_map;
1026 /* cacheline 3 */
1027 unsigned int tstat; 1043 unsigned int tstat;
1044 unsigned int rstat;
1045
1046 struct gfar_private *priv;
1028 unsigned long num_tx_queues; 1047 unsigned long num_tx_queues;
1029 unsigned long tx_bit_map; 1048 unsigned long tx_bit_map;
1049 unsigned long num_rx_queues;
1050 unsigned long rx_bit_map;
1030 1051
1031 struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; 1052 struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
1032}; 1053};
@@ -1041,6 +1062,11 @@ enum gfar_errata {
1041 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ 1062 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
1042}; 1063};
1043 1064
1065enum gfar_dev_state {
1066 GFAR_DOWN = 1,
1067 GFAR_RESETTING
1068};
1069
1044/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1070/* Struct stolen almost completely (and shamelessly) from the FCC enet source
1045 * (Ok, that's not so true anymore, but there is a family resemblance) 1071 * (Ok, that's not so true anymore, but there is a family resemblance)
1046 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 1072 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1051,8 +1077,6 @@ enum gfar_errata {
1051 * the buffer descriptor determines the actual condition. 1077 * the buffer descriptor determines the actual condition.
1052 */ 1078 */
1053struct gfar_private { 1079struct gfar_private {
1054 unsigned int num_rx_queues;
1055
1056 struct device *dev; 1080 struct device *dev;
1057 struct net_device *ndev; 1081 struct net_device *ndev;
1058 enum gfar_errata errata; 1082 enum gfar_errata errata;
@@ -1060,6 +1084,7 @@ struct gfar_private {
1060 1084
1061 u16 uses_rxfcb; 1085 u16 uses_rxfcb;
1062 u16 padding; 1086 u16 padding;
1087 u32 device_flags;
1063 1088
1064 /* HW time stamping enabled flag */ 1089 /* HW time stamping enabled flag */
1065 int hwts_rx_en; 1090 int hwts_rx_en;
@@ -1069,10 +1094,12 @@ struct gfar_private {
1069 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; 1094 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
1070 struct gfar_priv_grp gfargrp[MAXGROUPS]; 1095 struct gfar_priv_grp gfargrp[MAXGROUPS];
1071 1096
1072 u32 device_flags; 1097 unsigned long state;
1073 1098
1074 unsigned int mode; 1099 unsigned short mode;
1100 unsigned short poll_mode;
1075 unsigned int num_tx_queues; 1101 unsigned int num_tx_queues;
1102 unsigned int num_rx_queues;
1076 unsigned int num_grps; 1103 unsigned int num_grps;
1077 1104
1078 /* Network Statistics */ 1105 /* Network Statistics */
@@ -1113,6 +1140,9 @@ struct gfar_private {
1113 unsigned int total_tx_ring_size; 1140 unsigned int total_tx_ring_size;
1114 unsigned int total_rx_ring_size; 1141 unsigned int total_rx_ring_size;
1115 1142
1143 u32 rqueue;
1144 u32 tqueue;
1145
1116 /* RX per device parameters */ 1146 /* RX per device parameters */
1117 unsigned int rx_stash_size; 1147 unsigned int rx_stash_size;
1118 unsigned int rx_stash_index; 1148 unsigned int rx_stash_index;
@@ -1127,11 +1157,6 @@ struct gfar_private {
1127 u32 __iomem *hash_regs[16]; 1157 u32 __iomem *hash_regs[16];
1128 int hash_width; 1158 int hash_width;
1129 1159
1130 /* global parameters */
1131 unsigned int fifo_threshold;
1132 unsigned int fifo_starve;
1133 unsigned int fifo_starve_off;
1134
1135 /*Filer table*/ 1160 /*Filer table*/
1136 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 1161 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1137 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; 1162 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1176,21 +1201,42 @@ static inline void gfar_read_filer(struct gfar_private *priv,
1176 *fpr = gfar_read(&regs->rqfpr); 1201 *fpr = gfar_read(&regs->rqfpr);
1177} 1202}
1178 1203
1179void lock_rx_qs(struct gfar_private *priv); 1204static inline void gfar_write_isrg(struct gfar_private *priv)
1180void lock_tx_qs(struct gfar_private *priv); 1205{
1181void unlock_rx_qs(struct gfar_private *priv); 1206 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1182void unlock_tx_qs(struct gfar_private *priv); 1207 u32 __iomem *baddr = &regs->isrg0;
1208 u32 isrg = 0;
1209 int grp_idx, i;
1210
1211 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1212 struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
1213
1214 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
1215 isrg |= (ISRG_RR0 >> i);
1216 }
1217
1218 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
1219 isrg |= (ISRG_TR0 >> i);
1220 }
1221
1222 gfar_write(baddr, isrg);
1223
1224 baddr++;
1225 isrg = 0;
1226 }
1227}
1228
1183irqreturn_t gfar_receive(int irq, void *dev_id); 1229irqreturn_t gfar_receive(int irq, void *dev_id);
1184int startup_gfar(struct net_device *dev); 1230int startup_gfar(struct net_device *dev);
1185void stop_gfar(struct net_device *dev); 1231void stop_gfar(struct net_device *dev);
1186void gfar_halt(struct net_device *dev); 1232void reset_gfar(struct net_device *dev);
1233void gfar_mac_reset(struct gfar_private *priv);
1234void gfar_halt(struct gfar_private *priv);
1235void gfar_start(struct gfar_private *priv);
1187void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, 1236void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
1188 u32 regnum, u32 read); 1237 u32 regnum, u32 read);
1189void gfar_configure_coalescing_all(struct gfar_private *priv); 1238void gfar_configure_coalescing_all(struct gfar_private *priv);
1190void gfar_init_sysfs(struct net_device *dev);
1191int gfar_set_features(struct net_device *dev, netdev_features_t features); 1239int gfar_set_features(struct net_device *dev, netdev_features_t features);
1192void gfar_check_rx_parser_mode(struct gfar_private *priv);
1193void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
1194 1240
1195extern const struct ethtool_ops gfar_ethtool_ops; 1241extern const struct ethtool_ops gfar_ethtool_ops;
1196 1242
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 63d234419cc1..891dbee6e6c1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -44,10 +44,6 @@
44 44
45#include "gianfar.h" 45#include "gianfar.h"
46 46
47extern void gfar_start(struct net_device *dev);
48extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
49 int rx_work_limit);
50
51#define GFAR_MAX_COAL_USECS 0xffff 47#define GFAR_MAX_COAL_USECS 0xffff
52#define GFAR_MAX_COAL_FRAMES 0xff 48#define GFAR_MAX_COAL_FRAMES 0xff
53static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 49static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
@@ -364,25 +360,11 @@ static int gfar_scoalesce(struct net_device *dev,
364 struct ethtool_coalesce *cvals) 360 struct ethtool_coalesce *cvals)
365{ 361{
366 struct gfar_private *priv = netdev_priv(dev); 362 struct gfar_private *priv = netdev_priv(dev);
367 int i = 0; 363 int i, err = 0;
368 364
369 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 365 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
370 return -EOPNOTSUPP; 366 return -EOPNOTSUPP;
371 367
372 /* Set up rx coalescing */
373 /* As of now, we will enable/disable coalescing for all
374 * queues together in case of eTSEC2, this will be modified
375 * along with the ethtool interface
376 */
377 if ((cvals->rx_coalesce_usecs == 0) ||
378 (cvals->rx_max_coalesced_frames == 0)) {
379 for (i = 0; i < priv->num_rx_queues; i++)
380 priv->rx_queue[i]->rxcoalescing = 0;
381 } else {
382 for (i = 0; i < priv->num_rx_queues; i++)
383 priv->rx_queue[i]->rxcoalescing = 1;
384 }
385
386 if (NULL == priv->phydev) 368 if (NULL == priv->phydev)
387 return -ENODEV; 369 return -ENODEV;
388 370
@@ -399,6 +381,32 @@ static int gfar_scoalesce(struct net_device *dev,
399 return -EINVAL; 381 return -EINVAL;
400 } 382 }
401 383
384 /* Check the bounds of the values */
385 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
386 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
387 GFAR_MAX_COAL_USECS);
388 return -EINVAL;
389 }
390
391 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
392 netdev_info(dev, "Coalescing is limited to %d frames\n",
393 GFAR_MAX_COAL_FRAMES);
394 return -EINVAL;
395 }
396
397 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
398 cpu_relax();
399
400 /* Set up rx coalescing */
401 if ((cvals->rx_coalesce_usecs == 0) ||
402 (cvals->rx_max_coalesced_frames == 0)) {
403 for (i = 0; i < priv->num_rx_queues; i++)
404 priv->rx_queue[i]->rxcoalescing = 0;
405 } else {
406 for (i = 0; i < priv->num_rx_queues; i++)
407 priv->rx_queue[i]->rxcoalescing = 1;
408 }
409
402 for (i = 0; i < priv->num_rx_queues; i++) { 410 for (i = 0; i < priv->num_rx_queues; i++) {
403 priv->rx_queue[i]->rxic = mk_ic_value( 411 priv->rx_queue[i]->rxic = mk_ic_value(
404 cvals->rx_max_coalesced_frames, 412 cvals->rx_max_coalesced_frames,
@@ -415,28 +423,22 @@ static int gfar_scoalesce(struct net_device *dev,
415 priv->tx_queue[i]->txcoalescing = 1; 423 priv->tx_queue[i]->txcoalescing = 1;
416 } 424 }
417 425
418 /* Check the bounds of the values */
419 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
420 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
421 GFAR_MAX_COAL_USECS);
422 return -EINVAL;
423 }
424
425 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
426 netdev_info(dev, "Coalescing is limited to %d frames\n",
427 GFAR_MAX_COAL_FRAMES);
428 return -EINVAL;
429 }
430
431 for (i = 0; i < priv->num_tx_queues; i++) { 426 for (i = 0; i < priv->num_tx_queues; i++) {
432 priv->tx_queue[i]->txic = mk_ic_value( 427 priv->tx_queue[i]->txic = mk_ic_value(
433 cvals->tx_max_coalesced_frames, 428 cvals->tx_max_coalesced_frames,
434 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 429 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
435 } 430 }
436 431
437 gfar_configure_coalescing_all(priv); 432 if (dev->flags & IFF_UP) {
433 stop_gfar(dev);
434 err = startup_gfar(dev);
435 } else {
436 gfar_mac_reset(priv);
437 }
438
439 clear_bit_unlock(GFAR_RESETTING, &priv->state);
438 440
439 return 0; 441 return err;
440} 442}
441 443
442/* Fills in rvals with the current ring parameters. Currently, 444/* Fills in rvals with the current ring parameters. Currently,
@@ -467,15 +469,13 @@ static void gfar_gringparam(struct net_device *dev,
467} 469}
468 470
469/* Change the current ring parameters, stopping the controller if 471/* Change the current ring parameters, stopping the controller if
470 * necessary so that we don't mess things up while we're in 472 * necessary so that we don't mess things up while we're in motion.
471 * motion. We wait for the ring to be clean before reallocating
472 * the rings.
473 */ 473 */
474static int gfar_sringparam(struct net_device *dev, 474static int gfar_sringparam(struct net_device *dev,
475 struct ethtool_ringparam *rvals) 475 struct ethtool_ringparam *rvals)
476{ 476{
477 struct gfar_private *priv = netdev_priv(dev); 477 struct gfar_private *priv = netdev_priv(dev);
478 int err = 0, i = 0; 478 int err = 0, i;
479 479
480 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 480 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
481 return -EINVAL; 481 return -EINVAL;
@@ -493,44 +493,25 @@ static int gfar_sringparam(struct net_device *dev,
493 return -EINVAL; 493 return -EINVAL;
494 } 494 }
495 495
496 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
497 cpu_relax();
496 498
497 if (dev->flags & IFF_UP) { 499 if (dev->flags & IFF_UP)
498 unsigned long flags;
499
500 /* Halt TX and RX, and process the frames which
501 * have already been received
502 */
503 local_irq_save(flags);
504 lock_tx_qs(priv);
505 lock_rx_qs(priv);
506
507 gfar_halt(dev);
508
509 unlock_rx_qs(priv);
510 unlock_tx_qs(priv);
511 local_irq_restore(flags);
512
513 for (i = 0; i < priv->num_rx_queues; i++)
514 gfar_clean_rx_ring(priv->rx_queue[i],
515 priv->rx_queue[i]->rx_ring_size);
516
517 /* Now we take down the rings to rebuild them */
518 stop_gfar(dev); 500 stop_gfar(dev);
519 }
520 501
521 /* Change the size */ 502 /* Change the sizes */
522 for (i = 0; i < priv->num_rx_queues; i++) { 503 for (i = 0; i < priv->num_rx_queues; i++)
523 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; 504 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
505
506 for (i = 0; i < priv->num_tx_queues; i++)
524 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; 507 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
525 priv->tx_queue[i]->num_txbdfree =
526 priv->tx_queue[i]->tx_ring_size;
527 }
528 508
529 /* Rebuild the rings with the new size */ 509 /* Rebuild the rings with the new size */
530 if (dev->flags & IFF_UP) { 510 if (dev->flags & IFF_UP)
531 err = startup_gfar(dev); 511 err = startup_gfar(dev);
532 netif_tx_wake_all_queues(dev); 512
533 } 513 clear_bit_unlock(GFAR_RESETTING, &priv->state);
514
534 return err; 515 return err;
535} 516}
536 517
@@ -608,43 +589,29 @@ static int gfar_spauseparam(struct net_device *dev,
608 589
609int gfar_set_features(struct net_device *dev, netdev_features_t features) 590int gfar_set_features(struct net_device *dev, netdev_features_t features)
610{ 591{
611 struct gfar_private *priv = netdev_priv(dev);
612 unsigned long flags;
613 int err = 0, i = 0;
614 netdev_features_t changed = dev->features ^ features; 592 netdev_features_t changed = dev->features ^ features;
593 struct gfar_private *priv = netdev_priv(dev);
594 int err = 0;
615 595
616 if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) 596 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
617 gfar_vlan_mode(dev, features); 597 NETIF_F_RXCSUM)))
618
619 if (!(changed & NETIF_F_RXCSUM))
620 return 0; 598 return 0;
621 599
622 if (dev->flags & IFF_UP) { 600 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
623 /* Halt TX and RX, and process the frames which 601 cpu_relax();
624 * have already been received
625 */
626 local_irq_save(flags);
627 lock_tx_qs(priv);
628 lock_rx_qs(priv);
629
630 gfar_halt(dev);
631 602
632 unlock_tx_qs(priv); 603 dev->features = features;
633 unlock_rx_qs(priv);
634 local_irq_restore(flags);
635
636 for (i = 0; i < priv->num_rx_queues; i++)
637 gfar_clean_rx_ring(priv->rx_queue[i],
638 priv->rx_queue[i]->rx_ring_size);
639 604
605 if (dev->flags & IFF_UP) {
640 /* Now we take down the rings to rebuild them */ 606 /* Now we take down the rings to rebuild them */
641 stop_gfar(dev); 607 stop_gfar(dev);
642
643 dev->features = features;
644
645 err = startup_gfar(dev); 608 err = startup_gfar(dev);
646 netif_tx_wake_all_queues(dev); 609 } else {
610 gfar_mac_reset(priv);
647 } 611 }
612
613 clear_bit_unlock(GFAR_RESETTING, &priv->state);
614
648 return err; 615 return err;
649} 616}
650 617
@@ -1610,9 +1577,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1610 if (tab->index > MAX_FILER_IDX - 1) 1577 if (tab->index > MAX_FILER_IDX - 1)
1611 return -EBUSY; 1578 return -EBUSY;
1612 1579
1613 /* Avoid inconsistent filer table to be processed */
1614 lock_rx_qs(priv);
1615
1616 /* Fill regular entries */ 1580 /* Fill regular entries */
1617 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1581 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1618 i++) 1582 i++)
@@ -1625,8 +1589,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1625 */ 1589 */
1626 gfar_write_filer(priv, i, 0x20, 0x0); 1590 gfar_write_filer(priv, i, 0x20, 0x0);
1627 1591
1628 unlock_rx_qs(priv);
1629
1630 return 0; 1592 return 0;
1631} 1593}
1632 1594
@@ -1831,6 +1793,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1831 struct gfar_private *priv = netdev_priv(dev); 1793 struct gfar_private *priv = netdev_priv(dev);
1832 int ret = 0; 1794 int ret = 0;
1833 1795
1796 if (test_bit(GFAR_RESETTING, &priv->state))
1797 return -EBUSY;
1798
1834 mutex_lock(&priv->rx_queue_access); 1799 mutex_lock(&priv->rx_queue_access);
1835 1800
1836 switch (cmd->cmd) { 1801 switch (cmd->cmd) {
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index abc28da27042..bb568006f37d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -414,6 +414,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
414 .n_alarm = 0, 414 .n_alarm = 0,
415 .n_ext_ts = N_EXT_TS, 415 .n_ext_ts = N_EXT_TS,
416 .n_per_out = 0, 416 .n_per_out = 0,
417 .n_pins = 0,
417 .pps = 1, 418 .pps = 1,
418 .adjfreq = ptp_gianfar_adjfreq, 419 .adjfreq = ptp_gianfar_adjfreq,
419 .adjtime = ptp_gianfar_adjtime, 420 .adjtime = ptp_gianfar_adjtime,
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
deleted file mode 100644
index e02dd1378751..000000000000
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ /dev/null
@@ -1,340 +0,0 @@
1/*
2 * drivers/net/ethernet/freescale/gianfar_sysfs.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (galak@kernel.crashing.org)
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 *
13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Sysfs file creation and management
21 */
22
23#include <linux/kernel.h>
24#include <linux/string.h>
25#include <linux/errno.h>
26#include <linux/unistd.h>
27#include <linux/delay.h>
28#include <linux/etherdevice.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/device.h>
32
33#include <asm/uaccess.h>
34#include <linux/module.h>
35
36#include "gianfar.h"
37
38static ssize_t gfar_show_bd_stash(struct device *dev,
39 struct device_attribute *attr, char *buf)
40{
41 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
42
43 return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
44}
45
46static ssize_t gfar_set_bd_stash(struct device *dev,
47 struct device_attribute *attr,
48 const char *buf, size_t count)
49{
50 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
51 struct gfar __iomem *regs = priv->gfargrp[0].regs;
52 int new_setting = 0;
53 u32 temp;
54 unsigned long flags;
55
56 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
57 return count;
58
59
60 /* Find out the new setting */
61 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
62 new_setting = 1;
63 else if (!strncmp("off", buf, count - 1) ||
64 !strncmp("0", buf, count - 1))
65 new_setting = 0;
66 else
67 return count;
68
69
70 local_irq_save(flags);
71 lock_rx_qs(priv);
72
73 /* Set the new stashing value */
74 priv->bd_stash_en = new_setting;
75
76 temp = gfar_read(&regs->attr);
77
78 if (new_setting)
79 temp |= ATTR_BDSTASH;
80 else
81 temp &= ~(ATTR_BDSTASH);
82
83 gfar_write(&regs->attr, temp);
84
85 unlock_rx_qs(priv);
86 local_irq_restore(flags);
87
88 return count;
89}
90
91static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
92
93static ssize_t gfar_show_rx_stash_size(struct device *dev,
94 struct device_attribute *attr, char *buf)
95{
96 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
97
98 return sprintf(buf, "%d\n", priv->rx_stash_size);
99}
100
101static ssize_t gfar_set_rx_stash_size(struct device *dev,
102 struct device_attribute *attr,
103 const char *buf, size_t count)
104{
105 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
106 struct gfar __iomem *regs = priv->gfargrp[0].regs;
107 unsigned int length = simple_strtoul(buf, NULL, 0);
108 u32 temp;
109 unsigned long flags;
110
111 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
112 return count;
113
114 local_irq_save(flags);
115 lock_rx_qs(priv);
116
117 if (length > priv->rx_buffer_size)
118 goto out;
119
120 if (length == priv->rx_stash_size)
121 goto out;
122
123 priv->rx_stash_size = length;
124
125 temp = gfar_read(&regs->attreli);
126 temp &= ~ATTRELI_EL_MASK;
127 temp |= ATTRELI_EL(length);
128 gfar_write(&regs->attreli, temp);
129
130 /* Turn stashing on/off as appropriate */
131 temp = gfar_read(&regs->attr);
132
133 if (length)
134 temp |= ATTR_BUFSTASH;
135 else
136 temp &= ~(ATTR_BUFSTASH);
137
138 gfar_write(&regs->attr, temp);
139
140out:
141 unlock_rx_qs(priv);
142 local_irq_restore(flags);
143
144 return count;
145}
146
147static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
148 gfar_set_rx_stash_size);
149
150/* Stashing will only be enabled when rx_stash_size != 0 */
151static ssize_t gfar_show_rx_stash_index(struct device *dev,
152 struct device_attribute *attr,
153 char *buf)
154{
155 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
156
157 return sprintf(buf, "%d\n", priv->rx_stash_index);
158}
159
160static ssize_t gfar_set_rx_stash_index(struct device *dev,
161 struct device_attribute *attr,
162 const char *buf, size_t count)
163{
164 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
165 struct gfar __iomem *regs = priv->gfargrp[0].regs;
166 unsigned short index = simple_strtoul(buf, NULL, 0);
167 u32 temp;
168 unsigned long flags;
169
170 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
171 return count;
172
173 local_irq_save(flags);
174 lock_rx_qs(priv);
175
176 if (index > priv->rx_stash_size)
177 goto out;
178
179 if (index == priv->rx_stash_index)
180 goto out;
181
182 priv->rx_stash_index = index;
183
184 temp = gfar_read(&regs->attreli);
185 temp &= ~ATTRELI_EI_MASK;
186 temp |= ATTRELI_EI(index);
187 gfar_write(&regs->attreli, temp);
188
189out:
190 unlock_rx_qs(priv);
191 local_irq_restore(flags);
192
193 return count;
194}
195
196static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
197 gfar_set_rx_stash_index);
198
199static ssize_t gfar_show_fifo_threshold(struct device *dev,
200 struct device_attribute *attr,
201 char *buf)
202{
203 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
204
205 return sprintf(buf, "%d\n", priv->fifo_threshold);
206}
207
208static ssize_t gfar_set_fifo_threshold(struct device *dev,
209 struct device_attribute *attr,
210 const char *buf, size_t count)
211{
212 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
213 struct gfar __iomem *regs = priv->gfargrp[0].regs;
214 unsigned int length = simple_strtoul(buf, NULL, 0);
215 u32 temp;
216 unsigned long flags;
217
218 if (length > GFAR_MAX_FIFO_THRESHOLD)
219 return count;
220
221 local_irq_save(flags);
222 lock_tx_qs(priv);
223
224 priv->fifo_threshold = length;
225
226 temp = gfar_read(&regs->fifo_tx_thr);
227 temp &= ~FIFO_TX_THR_MASK;
228 temp |= length;
229 gfar_write(&regs->fifo_tx_thr, temp);
230
231 unlock_tx_qs(priv);
232 local_irq_restore(flags);
233
234 return count;
235}
236
237static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
238 gfar_set_fifo_threshold);
239
240static ssize_t gfar_show_fifo_starve(struct device *dev,
241 struct device_attribute *attr, char *buf)
242{
243 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
244
245 return sprintf(buf, "%d\n", priv->fifo_starve);
246}
247
248static ssize_t gfar_set_fifo_starve(struct device *dev,
249 struct device_attribute *attr,
250 const char *buf, size_t count)
251{
252 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
253 struct gfar __iomem *regs = priv->gfargrp[0].regs;
254 unsigned int num = simple_strtoul(buf, NULL, 0);
255 u32 temp;
256 unsigned long flags;
257
258 if (num > GFAR_MAX_FIFO_STARVE)
259 return count;
260
261 local_irq_save(flags);
262 lock_tx_qs(priv);
263
264 priv->fifo_starve = num;
265
266 temp = gfar_read(&regs->fifo_tx_starve);
267 temp &= ~FIFO_TX_STARVE_MASK;
268 temp |= num;
269 gfar_write(&regs->fifo_tx_starve, temp);
270
271 unlock_tx_qs(priv);
272 local_irq_restore(flags);
273
274 return count;
275}
276
277static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
278 gfar_set_fifo_starve);
279
280static ssize_t gfar_show_fifo_starve_off(struct device *dev,
281 struct device_attribute *attr,
282 char *buf)
283{
284 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
285
286 return sprintf(buf, "%d\n", priv->fifo_starve_off);
287}
288
289static ssize_t gfar_set_fifo_starve_off(struct device *dev,
290 struct device_attribute *attr,
291 const char *buf, size_t count)
292{
293 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
294 struct gfar __iomem *regs = priv->gfargrp[0].regs;
295 unsigned int num = simple_strtoul(buf, NULL, 0);
296 u32 temp;
297 unsigned long flags;
298
299 if (num > GFAR_MAX_FIFO_STARVE_OFF)
300 return count;
301
302 local_irq_save(flags);
303 lock_tx_qs(priv);
304
305 priv->fifo_starve_off = num;
306
307 temp = gfar_read(&regs->fifo_tx_starve_shutoff);
308 temp &= ~FIFO_TX_STARVE_OFF_MASK;
309 temp |= num;
310 gfar_write(&regs->fifo_tx_starve_shutoff, temp);
311
312 unlock_tx_qs(priv);
313 local_irq_restore(flags);
314
315 return count;
316}
317
318static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
319 gfar_set_fifo_starve_off);
320
321void gfar_init_sysfs(struct net_device *dev)
322{
323 struct gfar_private *priv = netdev_priv(dev);
324 int rc;
325
326 /* Initialize the default values */
327 priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
328 priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
329 priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
330
331 /* Create our sysfs files */
332 rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
333 rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
334 rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
335 rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
336 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
337 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
338 if (rc)
339 dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
340}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 72291a8904a9..c8299c31b21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3261,7 +3261,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3261 3261
3262 dev->stats.tx_packets++; 3262 dev->stats.tx_packets++;
3263 3263
3264 dev_kfree_skb(skb); 3264 dev_consume_skb_any(skb);
3265 3265
3266 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3266 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3267 ugeth->skb_dirtytx[txQ] = 3267 ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 17fca323c143..c984998b34a0 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -993,7 +993,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
993 dev->name)); 993 dev->name));
994 dev->stats.tx_dropped++; 994 dev->stats.tx_dropped++;
995 995
996 dev_kfree_skb(skb); 996 dev_kfree_skb_any(skb);
997 } else { 997 } else {
998 if (++lp->next_tx_cmd == TX_RING_SIZE) 998 if (++lp->next_tx_cmd == TX_RING_SIZE)
999 lp->next_tx_cmd = 0; 999 lp->next_tx_cmd = 0;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 7628e0fd8455..538903bf13bc 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -490,7 +490,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
490 skb_arr[index] = skb; 490 skb_arr[index] = skb;
491 tmp_addr = ehea_map_vaddr(skb->data); 491 tmp_addr = ehea_map_vaddr(skb->data);
492 if (tmp_addr == -1) { 492 if (tmp_addr == -1) {
493 dev_kfree_skb(skb); 493 dev_consume_skb_any(skb);
494 q_skba->os_skbs = fill_wqes - i; 494 q_skba->os_skbs = fill_wqes - i;
495 ret = 0; 495 ret = 0;
496 break; 496 break;
@@ -856,7 +856,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
856 856
857 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); 857 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
858 skb = pr->sq_skba.arr[index]; 858 skb = pr->sq_skba.arr[index];
859 dev_kfree_skb(skb); 859 dev_consume_skb_any(skb);
860 pr->sq_skba.arr[index] = NULL; 860 pr->sq_skba.arr[index] = NULL;
861 } 861 }
862 862
@@ -2044,7 +2044,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2044 skb_copy_bits(skb, 0, imm_data, skb->len); 2044 skb_copy_bits(skb, 0, imm_data, skb->len);
2045 2045
2046 swqe->immediate_data_length = skb->len; 2046 swqe->immediate_data_length = skb->len;
2047 dev_kfree_skb(skb); 2047 dev_consume_skb_any(skb);
2048} 2048}
2049 2049
2050static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) 2050static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 1fc8334fc181..c9127562bd22 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1044,7 +1044,7 @@ retry_bounce:
1044 DMA_TO_DEVICE); 1044 DMA_TO_DEVICE);
1045 1045
1046out: 1046out:
1047 dev_kfree_skb(skb); 1047 dev_consume_skb_any(skb);
1048 return NETDEV_TX_OK; 1048 return NETDEV_TX_OK;
1049 1049
1050map_failed_frags: 1050map_failed_frags:
@@ -1072,7 +1072,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1072 unsigned long lpar_rc; 1072 unsigned long lpar_rc;
1073 1073
1074restart_poll: 1074restart_poll:
1075 do { 1075 while (frames_processed < budget) {
1076 if (!ibmveth_rxq_pending_buffer(adapter)) 1076 if (!ibmveth_rxq_pending_buffer(adapter))
1077 break; 1077 break;
1078 1078
@@ -1121,7 +1121,7 @@ restart_poll:
1121 netdev->stats.rx_bytes += length; 1121 netdev->stats.rx_bytes += length;
1122 frames_processed++; 1122 frames_processed++;
1123 } 1123 }
1124 } while (frames_processed < budget); 1124 }
1125 1125
1126 ibmveth_replenish_task(adapter); 1126 ibmveth_replenish_task(adapter);
1127 1127
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index bf7a01ef9a57..b56461ce674c 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1778 * testing, ie sending frames with bad CRC. 1778 * testing, ie sending frames with bad CRC.
1779 */ 1779 */
1780 if (unlikely(skb->no_fcs)) 1780 if (unlikely(skb->no_fcs))
1781 cb->command |= __constant_cpu_to_le16(cb_tx_nc); 1781 cb->command |= cpu_to_le16(cb_tx_nc);
1782 else 1782 else
1783 cb->command &= ~__constant_cpu_to_le16(cb_tx_nc); 1783 cb->command &= ~cpu_to_le16(cb_tx_nc);
1784 1784
1785 /* interrupt every 16 packets regardless of delay */ 1785 /* interrupt every 16 packets regardless of delay */
1786 if ((nic->cbs_avail & ~15) == nic->cbs_avail) 1786 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index ff2d806eaef7..a5f6b11d6992 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* 80003ES2LAN Gigabit Ethernet Controller (Copper) 22/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
30 * 80003ES2LAN Gigabit Ethernet Controller (Serdes) 23 * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 90d363b2d280..535a9430976d 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_80003ES2LAN_H_ 22#ifndef _E1000E_80003ES2LAN_H_
30#define _E1000E_80003ES2LAN_H_ 23#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 8fed74e3fa53..e0aa7f1efb08 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* 82571EB Gigabit Ethernet Controller 22/* 82571EB Gigabit Ethernet Controller
30 * 82571EB Gigabit Ethernet Controller (Copper) 23 * 82571EB Gigabit Ethernet Controller (Copper)
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 08e24dc3dc0e..2e758f796d60 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_82571_H_ 22#ifndef _E1000E_82571_H_
30#define _E1000E_82571_H_ 23#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index c2dcfcc10857..106de493373c 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/1000 Linux driver 3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2013 Intel Corporation. 4# Copyright(c) 1999 - 2014 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details. 13# more details.
14# 14#
15# You should have received a copy of the GNU General Public License along with 15# You should have received a copy of the GNU General Public License
16# this program; if not, write to the Free Software Foundation, Inc., 16# along with this program; if not, see <http://www.gnu.org/licenses/>.
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18# 17#
19# The full GNU General Public License is included in this distribution in 18# The full GNU General Public License is included in this distribution in
20# the file called "COPYING". 19# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351c94a0cf74..d18e89212575 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000_DEFINES_H_ 22#ifndef _E1000_DEFINES_H_
30#define _E1000_DEFINES_H_ 23#define _E1000_DEFINES_H_
@@ -35,9 +28,11 @@
35 28
36/* Definitions for power management and wakeup registers */ 29/* Definitions for power management and wakeup registers */
37/* Wake Up Control */ 30/* Wake Up Control */
38#define E1000_WUC_APME 0x00000001 /* APM Enable */ 31#define E1000_WUC_APME 0x00000001 /* APM Enable */
39#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ 32#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
40#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ 33#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
34#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
35#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
41 36
42/* Wake Up Filter Control */ 37/* Wake Up Filter Control */
43#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 38#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0150f7fc893d..1471c5464a89 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* Linux PRO/1000 Ethernet Driver main header file */ 22/* Linux PRO/1000 Ethernet Driver main header file */
30 23
@@ -269,6 +262,7 @@ struct e1000_adapter {
269 u32 tx_head_addr; 262 u32 tx_head_addr;
270 u32 tx_fifo_size; 263 u32 tx_fifo_size;
271 u32 tx_dma_failed; 264 u32 tx_dma_failed;
265 u32 tx_hwtstamp_timeouts;
272 266
273 /* Rx */ 267 /* Rx */
274 bool (*clean_rx) (struct e1000_ring *ring, int *work_done, 268 bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
@@ -333,7 +327,6 @@ struct e1000_adapter {
333 struct work_struct update_phy_task; 327 struct work_struct update_phy_task;
334 struct work_struct print_hang_task; 328 struct work_struct print_hang_task;
335 329
336 bool idle_check;
337 int phy_hang_count; 330 int phy_hang_count;
338 331
339 u16 tx_ring_count; 332 u16 tx_ring_count;
@@ -342,6 +335,7 @@ struct e1000_adapter {
342 struct hwtstamp_config hwtstamp_config; 335 struct hwtstamp_config hwtstamp_config;
343 struct delayed_work systim_overflow_work; 336 struct delayed_work systim_overflow_work;
344 struct sk_buff *tx_hwtstamp_skb; 337 struct sk_buff *tx_hwtstamp_skb;
338 unsigned long tx_hwtstamp_start;
345 struct work_struct tx_hwtstamp_work; 339 struct work_struct tx_hwtstamp_work;
346 spinlock_t systim_lock; /* protects SYSTIML/H regsters */ 340 spinlock_t systim_lock; /* protects SYSTIML/H regsters */
347 struct cyclecounter cc; 341 struct cyclecounter cc;
@@ -476,7 +470,7 @@ void e1000e_check_options(struct e1000_adapter *adapter);
476void e1000e_set_ethtool_ops(struct net_device *netdev); 470void e1000e_set_ethtool_ops(struct net_device *netdev);
477 471
478int e1000e_up(struct e1000_adapter *adapter); 472int e1000e_up(struct e1000_adapter *adapter);
479void e1000e_down(struct e1000_adapter *adapter); 473void e1000e_down(struct e1000_adapter *adapter, bool reset);
480void e1000e_reinit_locked(struct e1000_adapter *adapter); 474void e1000e_reinit_locked(struct e1000_adapter *adapter);
481void e1000e_reset(struct e1000_adapter *adapter); 475void e1000e_reset(struct e1000_adapter *adapter);
482void e1000e_power_up_phy(struct e1000_adapter *adapter); 476void e1000e_power_up_phy(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d14c8f53384c..cad250bc1b99 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* ethtool support for e1000 */ 22/* ethtool support for e1000 */
30 23
@@ -111,6 +104,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
111 E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 104 E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
112 E1000_STAT("uncorr_ecc_errors", uncorr_errors), 105 E1000_STAT("uncorr_ecc_errors", uncorr_errors),
113 E1000_STAT("corr_ecc_errors", corr_errors), 106 E1000_STAT("corr_ecc_errors", corr_errors),
107 E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
114}; 108};
115 109
116#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) 110#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -332,7 +326,7 @@ static int e1000_set_settings(struct net_device *netdev,
332 326
333 /* reset the link */ 327 /* reset the link */
334 if (netif_running(adapter->netdev)) { 328 if (netif_running(adapter->netdev)) {
335 e1000e_down(adapter); 329 e1000e_down(adapter, true);
336 e1000e_up(adapter); 330 e1000e_up(adapter);
337 } else { 331 } else {
338 e1000e_reset(adapter); 332 e1000e_reset(adapter);
@@ -380,7 +374,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
380 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 374 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
381 hw->fc.requested_mode = e1000_fc_default; 375 hw->fc.requested_mode = e1000_fc_default;
382 if (netif_running(adapter->netdev)) { 376 if (netif_running(adapter->netdev)) {
383 e1000e_down(adapter); 377 e1000e_down(adapter, true);
384 e1000e_up(adapter); 378 e1000e_up(adapter);
385 } else { 379 } else {
386 e1000e_reset(adapter); 380 e1000e_reset(adapter);
@@ -726,7 +720,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
726 720
727 pm_runtime_get_sync(netdev->dev.parent); 721 pm_runtime_get_sync(netdev->dev.parent);
728 722
729 e1000e_down(adapter); 723 e1000e_down(adapter, true);
730 724
731 /* We can't just free everything and then setup again, because the 725 /* We can't just free everything and then setup again, because the
732 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring 726 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
@@ -924,15 +918,21 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
924 } 918 }
925 if (mac->type == e1000_pch2lan) { 919 if (mac->type == e1000_pch2lan) {
926 /* SHRAH[0,1,2] different than previous */ 920 /* SHRAH[0,1,2] different than previous */
927 if (i == 7) 921 if (i == 1)
928 mask &= 0xFFF4FFFF; 922 mask &= 0xFFF4FFFF;
929 /* SHRAH[3] different than SHRAH[0,1,2] */ 923 /* SHRAH[3] different than SHRAH[0,1,2] */
930 if (i == 10) 924 if (i == 4)
931 mask |= (1 << 30); 925 mask |= (1 << 30);
926 /* RAR[1-6] owned by management engine - skipping */
927 if (i > 0)
928 i += 6;
932 } 929 }
933 930
934 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, 931 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
935 0xFFFFFFFF); 932 0xFFFFFFFF);
933 /* reset index to actual value */
934 if ((mac->type == e1000_pch2lan) && (i > 6))
935 i -= 6;
936 } 936 }
937 937
938 for (i = 0; i < mac->mta_reg_count; i++) 938 for (i = 0; i < mac->mta_reg_count; i++)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b7f38435d1fd..6b3de5f39a97 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000_HW_H_ 22#ifndef _E1000_HW_H_
30#define _E1000_HW_H_ 23#define _E1000_HW_H_
@@ -655,12 +648,20 @@ struct e1000_shadow_ram {
655 648
656#define E1000_ICH8_SHADOW_RAM_WORDS 2048 649#define E1000_ICH8_SHADOW_RAM_WORDS 2048
657 650
651/* I218 PHY Ultra Low Power (ULP) states */
652enum e1000_ulp_state {
653 e1000_ulp_state_unknown,
654 e1000_ulp_state_off,
655 e1000_ulp_state_on,
656};
657
658struct e1000_dev_spec_ich8lan { 658struct e1000_dev_spec_ich8lan {
659 bool kmrn_lock_loss_workaround_enabled; 659 bool kmrn_lock_loss_workaround_enabled;
660 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 660 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
661 bool nvm_k1_enabled; 661 bool nvm_k1_enabled;
662 bool eee_disable; 662 bool eee_disable;
663 u16 eee_lp_ability; 663 u16 eee_lp_ability;
664 enum e1000_ulp_state ulp_state;
664}; 665};
665 666
666struct e1000_hw { 667struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 42f0f6717511..9866f264f55e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* 82562G 10/100 Network Connection 22/* 82562G 10/100 Network Connection
30 * 82562G-2 10/100 Network Connection 23 * 82562G-2 10/100 Network Connection
@@ -53,6 +46,14 @@
53 * 82578DC Gigabit Network Connection 46 * 82578DC Gigabit Network Connection
54 * 82579LM Gigabit Network Connection 47 * 82579LM Gigabit Network Connection
55 * 82579V Gigabit Network Connection 48 * 82579V Gigabit Network Connection
49 * Ethernet Connection I217-LM
50 * Ethernet Connection I217-V
51 * Ethernet Connection I218-V
52 * Ethernet Connection I218-LM
53 * Ethernet Connection (2) I218-LM
54 * Ethernet Connection (2) I218-V
55 * Ethernet Connection (3) I218-LM
56 * Ethernet Connection (3) I218-V
56 */ 57 */
57 58
58#include "e1000.h" 59#include "e1000.h"
@@ -142,7 +143,9 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
142static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 143static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
143static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 144static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 145static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
146static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
145static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); 147static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
148static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
146 149
147static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 150static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
148{ 151{
@@ -239,6 +242,47 @@ out:
239} 242}
240 243
241/** 244/**
245 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
246 * @hw: pointer to the HW structure
247 *
248 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
249 * used to reset the PHY to a quiescent state when necessary.
250 **/
251static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
252{
253 u32 mac_reg;
254
255 /* Set Phy Config Counter to 50msec */
256 mac_reg = er32(FEXTNVM3);
257 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
258 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
259 ew32(FEXTNVM3, mac_reg);
260
261 /* Toggle LANPHYPC Value bit */
262 mac_reg = er32(CTRL);
263 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
264 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
265 ew32(CTRL, mac_reg);
266 e1e_flush();
267 usleep_range(10, 20);
268 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
269 ew32(CTRL, mac_reg);
270 e1e_flush();
271
272 if (hw->mac.type < e1000_pch_lpt) {
273 msleep(50);
274 } else {
275 u16 count = 20;
276
277 do {
278 usleep_range(5000, 10000);
279 } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
280
281 msleep(30);
282 }
283}
284
285/**
242 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds 286 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
243 * @hw: pointer to the HW structure 287 * @hw: pointer to the HW structure
244 * 288 *
@@ -247,6 +291,7 @@ out:
247 **/ 291 **/
248static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) 292static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
249{ 293{
294 struct e1000_adapter *adapter = hw->adapter;
250 u32 mac_reg, fwsm = er32(FWSM); 295 u32 mac_reg, fwsm = er32(FWSM);
251 s32 ret_val; 296 s32 ret_val;
252 297
@@ -255,6 +300,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
255 */ 300 */
256 e1000_gate_hw_phy_config_ich8lan(hw, true); 301 e1000_gate_hw_phy_config_ich8lan(hw, true);
257 302
303 /* It is not possible to be certain of the current state of ULP
304 * so forcibly disable it.
305 */
306 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
307 e1000_disable_ulp_lpt_lp(hw, true);
308
258 ret_val = hw->phy.ops.acquire(hw); 309 ret_val = hw->phy.ops.acquire(hw);
259 if (ret_val) { 310 if (ret_val) {
260 e_dbg("Failed to initialize PHY flow\n"); 311 e_dbg("Failed to initialize PHY flow\n");
@@ -300,33 +351,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
300 break; 351 break;
301 } 352 }
302 353
303 e_dbg("Toggling LANPHYPC\n");
304
305 /* Set Phy Config Counter to 50msec */
306 mac_reg = er32(FEXTNVM3);
307 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
308 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
309 ew32(FEXTNVM3, mac_reg);
310
311 /* Toggle LANPHYPC Value bit */ 354 /* Toggle LANPHYPC Value bit */
312 mac_reg = er32(CTRL); 355 e1000_toggle_lanphypc_pch_lpt(hw);
313 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 356 if (hw->mac.type >= e1000_pch_lpt) {
314 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
315 ew32(CTRL, mac_reg);
316 e1e_flush();
317 usleep_range(10, 20);
318 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
319 ew32(CTRL, mac_reg);
320 e1e_flush();
321 if (hw->mac.type < e1000_pch_lpt) {
322 msleep(50);
323 } else {
324 u16 count = 20;
325 do {
326 usleep_range(5000, 10000);
327 } while (!(er32(CTRL_EXT) &
328 E1000_CTRL_EXT_LPCD) && count--);
329 usleep_range(30000, 60000);
330 if (e1000_phy_is_accessible_pchlan(hw)) 357 if (e1000_phy_is_accessible_pchlan(hw))
331 break; 358 break;
332 359
@@ -349,12 +376,31 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
349 376
350 hw->phy.ops.release(hw); 377 hw->phy.ops.release(hw);
351 if (!ret_val) { 378 if (!ret_val) {
379
380 /* Check to see if able to reset PHY. Print error if not */
381 if (hw->phy.ops.check_reset_block(hw)) {
382 e_err("Reset blocked by ME\n");
383 goto out;
384 }
385
352 /* Reset the PHY before any access to it. Doing so, ensures 386 /* Reset the PHY before any access to it. Doing so, ensures
353 * that the PHY is in a known good state before we read/write 387 * that the PHY is in a known good state before we read/write
354 * PHY registers. The generic reset is sufficient here, 388 * PHY registers. The generic reset is sufficient here,
355 * because we haven't determined the PHY type yet. 389 * because we haven't determined the PHY type yet.
356 */ 390 */
357 ret_val = e1000e_phy_hw_reset_generic(hw); 391 ret_val = e1000e_phy_hw_reset_generic(hw);
392 if (ret_val)
393 goto out;
394
395 /* On a successful reset, possibly need to wait for the PHY
396 * to quiesce to an accessible state before returning control
397 * to the calling function. If the PHY does not quiesce, then
398 * return E1000E_BLK_PHY_RESET, as this is the condition that
399 * the PHY is in.
400 */
401 ret_val = hw->phy.ops.check_reset_block(hw);
402 if (ret_val)
403 e_err("ME blocked access to PHY after reset\n");
358 } 404 }
359 405
360out: 406out:
@@ -724,8 +770,14 @@ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
724 * Enable/disable EEE based on setting in dev_spec structure, the duplex of 770 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
725 * the link and the EEE capabilities of the link partner. The LPI Control 771 * the link and the EEE capabilities of the link partner. The LPI Control
726 * register bits will remain set only if/when link is up. 772 * register bits will remain set only if/when link is up.
773 *
774 * EEE LPI must not be asserted earlier than one second after link is up.
775 * On 82579, EEE LPI should not be enabled until such time otherwise there
776 * can be link issues with some switches. Other devices can have EEE LPI
777 * enabled immediately upon link up since they have a timer in hardware which
778 * prevents LPI from being asserted too early.
727 **/ 779 **/
728static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 780s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
729{ 781{
730 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 782 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
731 s32 ret_val; 783 s32 ret_val;
@@ -979,6 +1031,253 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
979} 1031}
980 1032
981/** 1033/**
1034 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1035 * @hw: pointer to the HW structure
1036 * @to_sx: boolean indicating a system power state transition to Sx
1037 *
1038 * When link is down, configure ULP mode to significantly reduce the power
1039 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1040 * ME firmware to start the ULP configuration. If not on an ME enabled
1041 * system, configure the ULP mode by software.
1042 */
1043s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1044{
1045 u32 mac_reg;
1046 s32 ret_val = 0;
1047 u16 phy_reg;
1048
1049 if ((hw->mac.type < e1000_pch_lpt) ||
1050 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1051 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1052 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1053 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1054 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1055 return 0;
1056
1057 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1058 /* Request ME configure ULP mode in the PHY */
1059 mac_reg = er32(H2ME);
1060 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1061 ew32(H2ME, mac_reg);
1062
1063 goto out;
1064 }
1065
1066 if (!to_sx) {
1067 int i = 0;
1068
1069 /* Poll up to 5 seconds for Cable Disconnected indication */
1070 while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1071 /* Bail if link is re-acquired */
1072 if (er32(STATUS) & E1000_STATUS_LU)
1073 return -E1000_ERR_PHY;
1074
1075 if (i++ == 100)
1076 break;
1077
1078 msleep(50);
1079 }
1080 e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1081 (er32(FEXT) &
1082 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1083 }
1084
1085 ret_val = hw->phy.ops.acquire(hw);
1086 if (ret_val)
1087 goto out;
1088
1089 /* Force SMBus mode in PHY */
1090 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1091 if (ret_val)
1092 goto release;
1093 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1094 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1095
1096 /* Force SMBus mode in MAC */
1097 mac_reg = er32(CTRL_EXT);
1098 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1099 ew32(CTRL_EXT, mac_reg);
1100
1101 /* Set Inband ULP Exit, Reset to SMBus mode and
1102 * Disable SMBus Release on PERST# in PHY
1103 */
1104 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1105 if (ret_val)
1106 goto release;
1107 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1108 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1109 if (to_sx) {
1110 if (er32(WUFC) & E1000_WUFC_LNKC)
1111 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1112
1113 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1114 } else {
1115 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1116 }
1117 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1118
1119 /* Set Disable SMBus Release on PERST# in MAC */
1120 mac_reg = er32(FEXTNVM7);
1121 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1122 ew32(FEXTNVM7, mac_reg);
1123
1124 /* Commit ULP changes in PHY by starting auto ULP configuration */
1125 phy_reg |= I218_ULP_CONFIG1_START;
1126 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1127release:
1128 hw->phy.ops.release(hw);
1129out:
1130 if (ret_val)
1131 e_dbg("Error in ULP enable flow: %d\n", ret_val);
1132 else
1133 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1134
1135 return ret_val;
1136}
1137
1138/**
1139 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1140 * @hw: pointer to the HW structure
1141 * @force: boolean indicating whether or not to force disabling ULP
1142 *
1143 * Un-configure ULP mode when link is up, the system is transitioned from
1144 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1145 * system, poll for an indication from ME that ULP has been un-configured.
1146 * If not on an ME enabled system, un-configure the ULP mode by software.
1147 *
1148 * During nominal operation, this function is called when link is acquired
1149 * to disable ULP mode (force=false); otherwise, for example when unloading
1150 * the driver or during Sx->S0 transitions, this is called with force=true
1151 * to forcibly disable ULP.
1152 */
1153static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1154{
1155 s32 ret_val = 0;
1156 u32 mac_reg;
1157 u16 phy_reg;
1158 int i = 0;
1159
1160 if ((hw->mac.type < e1000_pch_lpt) ||
1161 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1162 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1163 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1164 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1165 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1166 return 0;
1167
1168 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1169 if (force) {
1170 /* Request ME un-configure ULP mode in the PHY */
1171 mac_reg = er32(H2ME);
1172 mac_reg &= ~E1000_H2ME_ULP;
1173 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1174 ew32(H2ME, mac_reg);
1175 }
1176
1177 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1178 while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1179 if (i++ == 10) {
1180 ret_val = -E1000_ERR_PHY;
1181 goto out;
1182 }
1183
1184 usleep_range(10000, 20000);
1185 }
1186 e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1187
1188 if (force) {
1189 mac_reg = er32(H2ME);
1190 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1191 ew32(H2ME, mac_reg);
1192 } else {
1193 /* Clear H2ME.ULP after ME ULP configuration */
1194 mac_reg = er32(H2ME);
1195 mac_reg &= ~E1000_H2ME_ULP;
1196 ew32(H2ME, mac_reg);
1197 }
1198
1199 goto out;
1200 }
1201
1202 ret_val = hw->phy.ops.acquire(hw);
1203 if (ret_val)
1204 goto out;
1205
1206 if (force)
1207 /* Toggle LANPHYPC Value bit */
1208 e1000_toggle_lanphypc_pch_lpt(hw);
1209
1210 /* Unforce SMBus mode in PHY */
1211 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1212 if (ret_val) {
1213 /* The MAC might be in PCIe mode, so temporarily force to
1214 * SMBus mode in order to access the PHY.
1215 */
1216 mac_reg = er32(CTRL_EXT);
1217 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1218 ew32(CTRL_EXT, mac_reg);
1219
1220 msleep(50);
1221
1222 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1223 &phy_reg);
1224 if (ret_val)
1225 goto release;
1226 }
1227 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1228 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1229
1230 /* Unforce SMBus mode in MAC */
1231 mac_reg = er32(CTRL_EXT);
1232 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1233 ew32(CTRL_EXT, mac_reg);
1234
1235 /* When ULP mode was previously entered, K1 was disabled by the
1236 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1237 */
1238 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1239 if (ret_val)
1240 goto release;
1241 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1242 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1243
1244 /* Clear ULP enabled configuration */
1245 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1246 if (ret_val)
1247 goto release;
1248 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1249 I218_ULP_CONFIG1_STICKY_ULP |
1250 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1251 I218_ULP_CONFIG1_WOL_HOST |
1252 I218_ULP_CONFIG1_INBAND_EXIT |
1253 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1254 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1255
1256 /* Commit ULP changes by starting auto ULP configuration */
1257 phy_reg |= I218_ULP_CONFIG1_START;
1258 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1259
1260 /* Clear Disable SMBus Release on PERST# in MAC */
1261 mac_reg = er32(FEXTNVM7);
1262 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1263 ew32(FEXTNVM7, mac_reg);
1264
1265release:
1266 hw->phy.ops.release(hw);
1267 if (force) {
1268 e1000_phy_hw_reset(hw);
1269 msleep(50);
1270 }
1271out:
1272 if (ret_val)
1273 e_dbg("Error in ULP disable flow: %d\n", ret_val);
1274 else
1275 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1276
1277 return ret_val;
1278}
1279
1280/**
982 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 1281 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
983 * @hw: pointer to the HW structure 1282 * @hw: pointer to the HW structure
984 * 1283 *
@@ -1106,9 +1405,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1106 e1000e_check_downshift(hw); 1405 e1000e_check_downshift(hw);
1107 1406
1108 /* Enable/Disable EEE after link up */ 1407 /* Enable/Disable EEE after link up */
1109 ret_val = e1000_set_eee_pchlan(hw); 1408 if (hw->phy.type > e1000_phy_82579) {
1110 if (ret_val) 1409 ret_val = e1000_set_eee_pchlan(hw);
1111 return ret_val; 1410 if (ret_val)
1411 return ret_val;
1412 }
1112 1413
1113 /* If we are forcing speed/duplex, then we simply return since 1414 /* If we are forcing speed/duplex, then we simply return since
1114 * we have already determined whether we have link or not. 1415 * we have already determined whether we have link or not.
@@ -1374,7 +1675,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1374 /* RAR[1-6] are owned by manageability. Skip those and program the 1675 /* RAR[1-6] are owned by manageability. Skip those and program the
1375 * next address into the SHRA register array. 1676 * next address into the SHRA register array.
1376 */ 1677 */
1377 if (index < (u32)(hw->mac.rar_entry_count - 6)) { 1678 if (index < (u32)(hw->mac.rar_entry_count)) {
1378 s32 ret_val; 1679 s32 ret_val;
1379 1680
1380 ret_val = e1000_acquire_swflag_ich8lan(hw); 1681 ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1484,11 +1785,13 @@ out:
1484 **/ 1785 **/
1485static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 1786static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1486{ 1787{
1487 u32 fwsm; 1788 bool blocked = false;
1789 int i = 0;
1488 1790
1489 fwsm = er32(FWSM); 1791 while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
1490 1792 (i++ < 10))
1491 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; 1793 usleep_range(10000, 20000);
1794 return blocked ? E1000_BLK_PHY_RESET : 0;
1492} 1795}
1493 1796
1494/** 1797/**
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 217090df33e7..bead50f9187b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_ICH8LAN_H_ 22#ifndef _E1000E_ICH8LAN_H_
30#define _E1000E_ICH8LAN_H_ 23#define _E1000E_ICH8LAN_H_
@@ -65,11 +58,16 @@
65 58
66#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 59#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
67#define E1000_FWSM_WLOCK_MAC_SHIFT 7 60#define E1000_FWSM_WLOCK_MAC_SHIFT 7
61#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
68 62
69/* Shared Receive Address Registers */ 63/* Shared Receive Address Registers */
70#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) 64#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
71#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) 65#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
72 66
67#define E1000_H2ME 0x05B50 /* Host to ME */
68#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
69#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
70
73#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ 71#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
74 (ID_LED_OFF1_OFF2 << 8) | \ 72 (ID_LED_OFF1_OFF2 << 8) | \
75 (ID_LED_OFF1_ON2 << 4) | \ 73 (ID_LED_OFF1_ON2 << 4) | \
@@ -82,6 +80,9 @@
82 80
83#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 81#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
84 82
83/* FEXT register bit definition */
84#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
85
85#define E1000_FEXTNVM_SW_CONFIG 1 86#define E1000_FEXTNVM_SW_CONFIG 1
86#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ 87#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
87 88
@@ -95,10 +96,12 @@
95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 96#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
96#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 97#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
97 98
99#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
100
98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 101#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
99 102
100#define E1000_ICH_RAR_ENTRIES 7 103#define E1000_ICH_RAR_ENTRIES 7
101#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */ 104#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
102#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ 105#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
103 106
104#define PHY_PAGE_SHIFT 5 107#define PHY_PAGE_SHIFT 5
@@ -161,6 +164,16 @@
161#define CV_SMB_CTRL PHY_REG(769, 23) 164#define CV_SMB_CTRL PHY_REG(769, 23)
162#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 165#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
163 166
167/* I218 Ultra Low Power Configuration 1 Register */
168#define I218_ULP_CONFIG1 PHY_REG(779, 16)
169#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
170#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
171#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
172#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
173#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
174#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
175#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
176
164/* SMBus Address Phy Register */ 177/* SMBus Address Phy Register */
165#define HV_SMB_ADDR PHY_REG(768, 26) 178#define HV_SMB_ADDR PHY_REG(768, 26)
166#define HV_SMB_ADDR_MASK 0x007F 179#define HV_SMB_ADDR_MASK 0x007F
@@ -195,6 +208,7 @@
195/* PHY Power Management Control */ 208/* PHY Power Management Control */
196#define HV_PM_CTRL PHY_REG(770, 17) 209#define HV_PM_CTRL PHY_REG(770, 17)
197#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 210#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
211#define HV_PM_CTRL_K1_ENABLE 0x4000
198 212
199#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ 213#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
200 214
@@ -268,4 +282,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
268s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); 282s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
269s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); 283s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
270s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); 284s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
285s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
286s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
271#endif /* _E1000E_ICH8LAN_H_ */ 287#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 2480c1091873..baa0a466d1d0 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index a61fee404ebe..4e81c2825b7a 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_MAC_H_ 22#ifndef _E1000E_MAC_H_
30#define _E1000E_MAC_H_ 23#define _E1000E_MAC_H_
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e4b0f1ef92f6..cb37ff1f1321 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 326897c29ea8..a8c27f98f7b0 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_MANAGE_H_ 22#ifndef _E1000E_MANAGE_H_
30#define _E1000E_MANAGE_H_ 23#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d91933c4cdd..dce377b59b2c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 23
@@ -885,7 +878,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
885 struct sk_buff *skb) 878 struct sk_buff *skb)
886{ 879{
887 if (netdev->features & NETIF_F_RXHASH) 880 if (netdev->features & NETIF_F_RXHASH)
888 skb->rxhash = le32_to_cpu(rss); 881 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
889} 882}
890 883
891/** 884/**
@@ -1097,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work)
1097 adapter->tx_hang_recheck = true; 1090 adapter->tx_hang_recheck = true;
1098 return; 1091 return;
1099 } 1092 }
1100 /* Real hang detected */
1101 adapter->tx_hang_recheck = false; 1093 adapter->tx_hang_recheck = false;
1094
1095 if (er32(TDH(0)) == er32(TDT(0))) {
1096 e_dbg("false hang detected, ignoring\n");
1097 return;
1098 }
1099
1100 /* Real hang detected */
1102 netif_stop_queue(netdev); 1101 netif_stop_queue(netdev);
1103 1102
1104 e1e_rphy(hw, MII_BMSR, &phy_status); 1103 e1e_rphy(hw, MII_BMSR, &phy_status);
@@ -1128,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
1128 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), 1127 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1129 phy_status, phy_1000t_status, phy_ext_status, pci_status); 1128 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1130 1129
1130 e1000e_dump(adapter);
1131
1131 /* Suggest workaround for known h/w issue */ 1132 /* Suggest workaround for known h/w issue */
1132 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) 1133 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1133 e_err("Try turning off Tx pause (flow control) via ethtool\n"); 1134 e_err("Try turning off Tx pause (flow control) via ethtool\n");
@@ -1147,9 +1148,6 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1147 tx_hwtstamp_work); 1148 tx_hwtstamp_work);
1148 struct e1000_hw *hw = &adapter->hw; 1149 struct e1000_hw *hw = &adapter->hw;
1149 1150
1150 if (!adapter->tx_hwtstamp_skb)
1151 return;
1152
1153 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { 1151 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1154 struct skb_shared_hwtstamps shhwtstamps; 1152 struct skb_shared_hwtstamps shhwtstamps;
1155 u64 txstmp; 1153 u64 txstmp;
@@ -1162,6 +1160,12 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1162 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); 1160 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
1163 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1161 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1164 adapter->tx_hwtstamp_skb = NULL; 1162 adapter->tx_hwtstamp_skb = NULL;
1163 } else if (time_after(jiffies, adapter->tx_hwtstamp_start
1164 + adapter->tx_timeout_factor * HZ)) {
1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1166 adapter->tx_hwtstamp_skb = NULL;
1167 adapter->tx_hwtstamp_timeouts++;
1168 e_warn("clearing Tx timestamp hang");
1165 } else { 1169 } else {
1166 /* reschedule to check later */ 1170 /* reschedule to check later */
1167 schedule_work(&adapter->tx_hwtstamp_work); 1171 schedule_work(&adapter->tx_hwtstamp_work);
@@ -1701,7 +1705,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1701 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1705 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1702 1706
1703 writel(0, rx_ring->head); 1707 writel(0, rx_ring->head);
1704 if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 1708 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1705 e1000e_update_rdt_wa(rx_ring, 0); 1709 e1000e_update_rdt_wa(rx_ring, 0);
1706 else 1710 else
1707 writel(0, rx_ring->tail); 1711 writel(0, rx_ring->tail);
@@ -2038,13 +2042,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2038 msix_entry), 2042 msix_entry),
2039 GFP_KERNEL); 2043 GFP_KERNEL);
2040 if (adapter->msix_entries) { 2044 if (adapter->msix_entries) {
2045 struct e1000_adapter *a = adapter;
2046
2041 for (i = 0; i < adapter->num_vectors; i++) 2047 for (i = 0; i < adapter->num_vectors; i++)
2042 adapter->msix_entries[i].entry = i; 2048 adapter->msix_entries[i].entry = i;
2043 2049
2044 err = pci_enable_msix(adapter->pdev, 2050 err = pci_enable_msix_range(a->pdev,
2045 adapter->msix_entries, 2051 a->msix_entries,
2046 adapter->num_vectors); 2052 a->num_vectors,
2047 if (err == 0) 2053 a->num_vectors);
2054 if (err > 0)
2048 return; 2055 return;
2049 } 2056 }
2050 /* MSI-X failed, so fall through and try MSI */ 2057 /* MSI-X failed, so fall through and try MSI */
@@ -2402,7 +2409,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2402 tx_ring->next_to_clean = 0; 2409 tx_ring->next_to_clean = 0;
2403 2410
2404 writel(0, tx_ring->head); 2411 writel(0, tx_ring->head);
2405 if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 2412 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2406 e1000e_update_tdt_wa(tx_ring, 0); 2413 e1000e_update_tdt_wa(tx_ring, 0);
2407 else 2414 else
2408 writel(0, tx_ring->tail); 2415 writel(0, tx_ring->tail);
@@ -2894,7 +2901,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2894 struct e1000_hw *hw = &adapter->hw; 2901 struct e1000_hw *hw = &adapter->hw;
2895 struct e1000_ring *tx_ring = adapter->tx_ring; 2902 struct e1000_ring *tx_ring = adapter->tx_ring;
2896 u64 tdba; 2903 u64 tdba;
2897 u32 tdlen, tarc; 2904 u32 tdlen, tctl, tarc;
2898 2905
2899 /* Setup the HW Tx Head and Tail descriptor pointers */ 2906 /* Setup the HW Tx Head and Tail descriptor pointers */
2900 tdba = tx_ring->dma; 2907 tdba = tx_ring->dma;
@@ -2931,6 +2938,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2931 /* erratum work around: set txdctl the same for both queues */ 2938 /* erratum work around: set txdctl the same for both queues */
2932 ew32(TXDCTL(1), er32(TXDCTL(0))); 2939 ew32(TXDCTL(1), er32(TXDCTL(0)));
2933 2940
2941 /* Program the Transmit Control Register */
2942 tctl = er32(TCTL);
2943 tctl &= ~E1000_TCTL_CT;
2944 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2945 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2946
2934 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2947 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2935 tarc = er32(TARC(0)); 2948 tarc = er32(TARC(0));
2936 /* set the speed mode bit, we'll clear it if we're not at 2949 /* set the speed mode bit, we'll clear it if we're not at
@@ -2961,6 +2974,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2961 /* enable Report Status bit */ 2974 /* enable Report Status bit */
2962 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2975 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2963 2976
2977 ew32(TCTL, tctl);
2978
2964 hw->mac.ops.config_collision_dist(hw); 2979 hw->mac.ops.config_collision_dist(hw);
2965} 2980}
2966 2981
@@ -2976,11 +2991,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2976 u32 rctl, rfctl; 2991 u32 rctl, rfctl;
2977 u32 pages = 0; 2992 u32 pages = 0;
2978 2993
2979 /* Workaround Si errata on PCHx - configure jumbo frame flow */ 2994 /* Workaround Si errata on PCHx - configure jumbo frame flow.
2980 if ((hw->mac.type >= e1000_pch2lan) && 2995 * If jumbo frames not set, program related MAC/PHY registers
2981 (adapter->netdev->mtu > ETH_DATA_LEN) && 2996 * to h/w defaults
2982 e1000_lv_jumbo_workaround_ich8lan(hw, true)) 2997 */
2983 e_dbg("failed to enable jumbo frame workaround mode\n"); 2998 if (hw->mac.type >= e1000_pch2lan) {
2999 s32 ret_val;
3000
3001 if (adapter->netdev->mtu > ETH_DATA_LEN)
3002 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
3003 else
3004 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
3005
3006 if (ret_val)
3007 e_dbg("failed to enable|disable jumbo frame workaround mode\n");
3008 }
2984 3009
2985 /* Program MC offset vector base */ 3010 /* Program MC offset vector base */
2986 rctl = er32(RCTL); 3011 rctl = er32(RCTL);
@@ -3331,6 +3356,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3331 struct e1000_hw *hw = &adapter->hw; 3356 struct e1000_hw *hw = &adapter->hw;
3332 u32 rctl; 3357 u32 rctl;
3333 3358
3359 if (pm_runtime_suspended(netdev->dev.parent))
3360 return;
3361
3334 /* Check for Promiscuous and All Multicast modes */ 3362 /* Check for Promiscuous and All Multicast modes */
3335 rctl = er32(RCTL); 3363 rctl = er32(RCTL);
3336 3364
@@ -3691,10 +3719,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
3691 */ 3719 */
3692static void e1000_power_down_phy(struct e1000_adapter *adapter) 3720static void e1000_power_down_phy(struct e1000_adapter *adapter)
3693{ 3721{
3694 /* WoL is enabled */
3695 if (adapter->wol)
3696 return;
3697
3698 if (adapter->hw.phy.ops.power_down) 3722 if (adapter->hw.phy.ops.power_down)
3699 adapter->hw.phy.ops.power_down(&adapter->hw); 3723 adapter->hw.phy.ops.power_down(&adapter->hw);
3700} 3724}
@@ -3911,10 +3935,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
3911 } 3935 }
3912 3936
3913 if (!netif_running(adapter->netdev) && 3937 if (!netif_running(adapter->netdev) &&
3914 !test_bit(__E1000_TESTING, &adapter->state)) { 3938 !test_bit(__E1000_TESTING, &adapter->state))
3915 e1000_power_down_phy(adapter); 3939 e1000_power_down_phy(adapter);
3916 return;
3917 }
3918 3940
3919 e1000_get_phy_info(hw); 3941 e1000_get_phy_info(hw);
3920 3942
@@ -3981,7 +4003,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3981 4003
3982static void e1000e_update_stats(struct e1000_adapter *adapter); 4004static void e1000e_update_stats(struct e1000_adapter *adapter);
3983 4005
3984void e1000e_down(struct e1000_adapter *adapter) 4006/**
4007 * e1000e_down - quiesce the device and optionally reset the hardware
4008 * @adapter: board private structure
4009 * @reset: boolean flag to reset the hardware or not
4010 */
4011void e1000e_down(struct e1000_adapter *adapter, bool reset)
3985{ 4012{
3986 struct net_device *netdev = adapter->netdev; 4013 struct net_device *netdev = adapter->netdev;
3987 struct e1000_hw *hw = &adapter->hw; 4014 struct e1000_hw *hw = &adapter->hw;
@@ -4035,12 +4062,8 @@ void e1000e_down(struct e1000_adapter *adapter)
4035 e1000_lv_jumbo_workaround_ich8lan(hw, false)) 4062 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4036 e_dbg("failed to disable jumbo frame workaround mode\n"); 4063 e_dbg("failed to disable jumbo frame workaround mode\n");
4037 4064
4038 if (!pci_channel_offline(adapter->pdev)) 4065 if (reset && !pci_channel_offline(adapter->pdev))
4039 e1000e_reset(adapter); 4066 e1000e_reset(adapter);
4040
4041 /* TODO: for power management, we could drop the link and
4042 * pci_disable_device here.
4043 */
4044} 4067}
4045 4068
4046void e1000e_reinit_locked(struct e1000_adapter *adapter) 4069void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4048,7 +4071,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
4048 might_sleep(); 4071 might_sleep();
4049 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4072 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4050 usleep_range(1000, 2000); 4073 usleep_range(1000, 2000);
4051 e1000e_down(adapter); 4074 e1000e_down(adapter, true);
4052 e1000e_up(adapter); 4075 e1000e_up(adapter);
4053 clear_bit(__E1000_RESETTING, &adapter->state); 4076 clear_bit(__E1000_RESETTING, &adapter->state);
4054} 4077}
@@ -4326,7 +4349,6 @@ static int e1000_open(struct net_device *netdev)
4326 adapter->tx_hang_recheck = false; 4349 adapter->tx_hang_recheck = false;
4327 netif_start_queue(netdev); 4350 netif_start_queue(netdev);
4328 4351
4329 adapter->idle_check = true;
4330 hw->mac.get_link_status = true; 4352 hw->mac.get_link_status = true;
4331 pm_runtime_put(&pdev->dev); 4353 pm_runtime_put(&pdev->dev);
4332 4354
@@ -4376,14 +4398,15 @@ static int e1000_close(struct net_device *netdev)
4376 pm_runtime_get_sync(&pdev->dev); 4398 pm_runtime_get_sync(&pdev->dev);
4377 4399
4378 if (!test_bit(__E1000_DOWN, &adapter->state)) { 4400 if (!test_bit(__E1000_DOWN, &adapter->state)) {
4379 e1000e_down(adapter); 4401 e1000e_down(adapter, true);
4380 e1000_free_irq(adapter); 4402 e1000_free_irq(adapter);
4403
4404 /* Link status message must follow this format */
4405 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
4381 } 4406 }
4382 4407
4383 napi_disable(&adapter->napi); 4408 napi_disable(&adapter->napi);
4384 4409
4385 e1000_power_down_phy(adapter);
4386
4387 e1000e_free_tx_resources(adapter->tx_ring); 4410 e1000e_free_tx_resources(adapter->tx_ring);
4388 e1000e_free_rx_resources(adapter->rx_ring); 4411 e1000e_free_rx_resources(adapter->rx_ring);
4389 4412
@@ -4460,11 +4483,16 @@ static void e1000e_update_phy_task(struct work_struct *work)
4460 struct e1000_adapter *adapter = container_of(work, 4483 struct e1000_adapter *adapter = container_of(work,
4461 struct e1000_adapter, 4484 struct e1000_adapter,
4462 update_phy_task); 4485 update_phy_task);
4486 struct e1000_hw *hw = &adapter->hw;
4463 4487
4464 if (test_bit(__E1000_DOWN, &adapter->state)) 4488 if (test_bit(__E1000_DOWN, &adapter->state))
4465 return; 4489 return;
4466 4490
4467 e1000_get_phy_info(&adapter->hw); 4491 e1000_get_phy_info(hw);
4492
4493 /* Enable EEE on 82579 after link up */
4494 if (hw->phy.type == e1000_phy_82579)
4495 e1000_set_eee_pchlan(hw);
4468} 4496}
4469 4497
4470/** 4498/**
@@ -4799,6 +4827,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4799 4827
4800 if (adapter->phy_hang_count > 1) { 4828 if (adapter->phy_hang_count > 1) {
4801 adapter->phy_hang_count = 0; 4829 adapter->phy_hang_count = 0;
4830 e_dbg("PHY appears hung - resetting\n");
4802 schedule_work(&adapter->reset_task); 4831 schedule_work(&adapter->reset_task);
4803 } 4832 }
4804} 4833}
@@ -4957,15 +4986,11 @@ static void e1000_watchdog_task(struct work_struct *work)
4957 mod_timer(&adapter->phy_info_timer, 4986 mod_timer(&adapter->phy_info_timer,
4958 round_jiffies(jiffies + 2 * HZ)); 4987 round_jiffies(jiffies + 2 * HZ));
4959 4988
4960 /* The link is lost so the controller stops DMA. 4989 /* 8000ES2LAN requires a Rx packet buffer work-around
4961 * If there is queued Tx work that cannot be done 4990 * on link down event; reset the controller to flush
4962 * or if on an 8000ES2LAN which requires a Rx packet 4991 * the Rx packet buffer.
4963 * buffer work-around on link down event, reset the
4964 * controller to flush the Tx/Rx packet buffers.
4965 * (Do the reset outside of interrupt context).
4966 */ 4992 */
4967 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || 4993 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4968 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
4969 adapter->flags |= FLAG_RESTART_NOW; 4994 adapter->flags |= FLAG_RESTART_NOW;
4970 else 4995 else
4971 pm_schedule_suspend(netdev->dev.parent, 4996 pm_schedule_suspend(netdev->dev.parent,
@@ -4988,6 +5013,15 @@ link_up:
4988 adapter->gotc_old = adapter->stats.gotc; 5013 adapter->gotc_old = adapter->stats.gotc;
4989 spin_unlock(&adapter->stats64_lock); 5014 spin_unlock(&adapter->stats64_lock);
4990 5015
5016 /* If the link is lost the controller stops DMA, but
5017 * if there is queued Tx work it cannot be done. So
5018 * reset the controller to flush the Tx packet buffers.
5019 */
5020 if (!netif_carrier_ok(netdev) &&
5021 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5022 adapter->flags |= FLAG_RESTART_NOW;
5023
5024 /* If reset is necessary, do it outside of interrupt context. */
4991 if (adapter->flags & FLAG_RESTART_NOW) { 5025 if (adapter->flags & FLAG_RESTART_NOW) {
4992 schedule_work(&adapter->reset_task); 5026 schedule_work(&adapter->reset_task);
4993 /* return immediately since reset is imminent */ 5027 /* return immediately since reset is imminent */
@@ -5546,6 +5580,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5546 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 5580 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5547 tx_flags |= E1000_TX_FLAGS_HWTSTAMP; 5581 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5548 adapter->tx_hwtstamp_skb = skb_get(skb); 5582 adapter->tx_hwtstamp_skb = skb_get(skb);
5583 adapter->tx_hwtstamp_start = jiffies;
5549 schedule_work(&adapter->tx_hwtstamp_work); 5584 schedule_work(&adapter->tx_hwtstamp_work);
5550 } else { 5585 } else {
5551 skb_tx_timestamp(skb); 5586 skb_tx_timestamp(skb);
@@ -5684,8 +5719,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5684 adapter->max_frame_size = max_frame; 5719 adapter->max_frame_size = max_frame;
5685 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5720 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5686 netdev->mtu = new_mtu; 5721 netdev->mtu = new_mtu;
5722
5723 pm_runtime_get_sync(netdev->dev.parent);
5724
5687 if (netif_running(netdev)) 5725 if (netif_running(netdev))
5688 e1000e_down(adapter); 5726 e1000e_down(adapter, true);
5689 5727
5690 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 5728 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5691 * means we reserve 2 more, this pushes us to allocate from the next 5729 * means we reserve 2 more, this pushes us to allocate from the next
@@ -5711,6 +5749,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5711 else 5749 else
5712 e1000e_reset(adapter); 5750 e1000e_reset(adapter);
5713 5751
5752 pm_runtime_put_sync(netdev->dev.parent);
5753
5714 clear_bit(__E1000_RESETTING, &adapter->state); 5754 clear_bit(__E1000_RESETTING, &adapter->state);
5715 5755
5716 return 0; 5756 return 0;
@@ -5852,7 +5892,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5852static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 5892static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5853{ 5893{
5854 struct e1000_hw *hw = &adapter->hw; 5894 struct e1000_hw *hw = &adapter->hw;
5855 u32 i, mac_reg; 5895 u32 i, mac_reg, wuc;
5856 u16 phy_reg, wuc_enable; 5896 u16 phy_reg, wuc_enable;
5857 int retval; 5897 int retval;
5858 5898
@@ -5899,13 +5939,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5899 phy_reg |= BM_RCTL_RFCE; 5939 phy_reg |= BM_RCTL_RFCE;
5900 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 5940 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5901 5941
5942 wuc = E1000_WUC_PME_EN;
5943 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
5944 wuc |= E1000_WUC_APME;
5945
5902 /* enable PHY wakeup in MAC register */ 5946 /* enable PHY wakeup in MAC register */
5903 ew32(WUFC, wufc); 5947 ew32(WUFC, wufc);
5904 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 5948 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
5949 E1000_WUC_PME_STATUS | wuc));
5905 5950
5906 /* configure and enable PHY wakeup in PHY registers */ 5951 /* configure and enable PHY wakeup in PHY registers */
5907 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 5952 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5908 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 5953 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
5909 5954
5910 /* activate PHY wakeup */ 5955 /* activate PHY wakeup */
5911 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 5956 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
@@ -5918,15 +5963,10 @@ release:
5918 return retval; 5963 return retval;
5919} 5964}
5920 5965
5921static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) 5966static int e1000e_pm_freeze(struct device *dev)
5922{ 5967{
5923 struct net_device *netdev = pci_get_drvdata(pdev); 5968 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
5924 struct e1000_adapter *adapter = netdev_priv(netdev); 5969 struct e1000_adapter *adapter = netdev_priv(netdev);
5925 struct e1000_hw *hw = &adapter->hw;
5926 u32 ctrl, ctrl_ext, rctl, status;
5927 /* Runtime suspend should only enable wakeup for link changes */
5928 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5929 int retval = 0;
5930 5970
5931 netif_device_detach(netdev); 5971 netif_device_detach(netdev);
5932 5972
@@ -5937,11 +5977,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5937 usleep_range(10000, 20000); 5977 usleep_range(10000, 20000);
5938 5978
5939 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5979 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5940 e1000e_down(adapter); 5980
5981 /* Quiesce the device without resetting the hardware */
5982 e1000e_down(adapter, false);
5941 e1000_free_irq(adapter); 5983 e1000_free_irq(adapter);
5942 } 5984 }
5943 e1000e_reset_interrupt_capability(adapter); 5985 e1000e_reset_interrupt_capability(adapter);
5944 5986
5987 /* Allow time for pending master requests to run */
5988 e1000e_disable_pcie_master(&adapter->hw);
5989
5990 return 0;
5991}
5992
5993static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5994{
5995 struct net_device *netdev = pci_get_drvdata(pdev);
5996 struct e1000_adapter *adapter = netdev_priv(netdev);
5997 struct e1000_hw *hw = &adapter->hw;
5998 u32 ctrl, ctrl_ext, rctl, status;
5999 /* Runtime suspend should only enable wakeup for link changes */
6000 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
6001 int retval = 0;
6002
5945 status = er32(STATUS); 6003 status = er32(STATUS);
5946 if (status & E1000_STATUS_LU) 6004 if (status & E1000_STATUS_LU)
5947 wufc &= ~E1000_WUFC_LNKC; 6005 wufc &= ~E1000_WUFC_LNKC;
@@ -5972,12 +6030,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5972 ew32(CTRL_EXT, ctrl_ext); 6030 ew32(CTRL_EXT, ctrl_ext);
5973 } 6031 }
5974 6032
6033 if (!runtime)
6034 e1000e_power_up_phy(adapter);
6035
5975 if (adapter->flags & FLAG_IS_ICH) 6036 if (adapter->flags & FLAG_IS_ICH)
5976 e1000_suspend_workarounds_ich8lan(&adapter->hw); 6037 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5977 6038
5978 /* Allow time for pending master requests to run */
5979 e1000e_disable_pcie_master(&adapter->hw);
5980
5981 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 6039 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5982 /* enable wakeup by the PHY */ 6040 /* enable wakeup by the PHY */
5983 retval = e1000_init_phy_wakeup(adapter, wufc); 6041 retval = e1000_init_phy_wakeup(adapter, wufc);
@@ -5991,10 +6049,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5991 } else { 6049 } else {
5992 ew32(WUC, 0); 6050 ew32(WUC, 0);
5993 ew32(WUFC, 0); 6051 ew32(WUFC, 0);
6052
6053 e1000_power_down_phy(adapter);
5994 } 6054 }
5995 6055
5996 if (adapter->hw.phy.type == e1000_phy_igp_3) 6056 if (adapter->hw.phy.type == e1000_phy_igp_3) {
5997 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 6057 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
6058 } else if (hw->mac.type == e1000_pch_lpt) {
6059 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6060 /* ULP does not support wake from unicast, multicast
6061 * or broadcast.
6062 */
6063 retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
6064
6065 if (retval)
6066 return retval;
6067 }
6068
5998 6069
5999 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6070 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6000 * would have already happened in close and is redundant. 6071 * would have already happened in close and is redundant.
@@ -6102,18 +6173,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6102} 6173}
6103 6174
6104#ifdef CONFIG_PM 6175#ifdef CONFIG_PM
6105static bool e1000e_pm_ready(struct e1000_adapter *adapter)
6106{
6107 return !!adapter->tx_ring->buffer_info;
6108}
6109
6110static int __e1000_resume(struct pci_dev *pdev) 6176static int __e1000_resume(struct pci_dev *pdev)
6111{ 6177{
6112 struct net_device *netdev = pci_get_drvdata(pdev); 6178 struct net_device *netdev = pci_get_drvdata(pdev);
6113 struct e1000_adapter *adapter = netdev_priv(netdev); 6179 struct e1000_adapter *adapter = netdev_priv(netdev);
6114 struct e1000_hw *hw = &adapter->hw; 6180 struct e1000_hw *hw = &adapter->hw;
6115 u16 aspm_disable_flag = 0; 6181 u16 aspm_disable_flag = 0;
6116 u32 err;
6117 6182
6118 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 6183 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6119 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6184 aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6124,13 +6189,6 @@ static int __e1000_resume(struct pci_dev *pdev)
6124 6189
6125 pci_set_master(pdev); 6190 pci_set_master(pdev);
6126 6191
6127 e1000e_set_interrupt_capability(adapter);
6128 if (netif_running(netdev)) {
6129 err = e1000_request_irq(adapter);
6130 if (err)
6131 return err;
6132 }
6133
6134 if (hw->mac.type >= e1000_pch2lan) 6192 if (hw->mac.type >= e1000_pch2lan)
6135 e1000_resume_workarounds_pchlan(&adapter->hw); 6193 e1000_resume_workarounds_pchlan(&adapter->hw);
6136 6194
@@ -6169,11 +6227,6 @@ static int __e1000_resume(struct pci_dev *pdev)
6169 6227
6170 e1000_init_manageability_pt(adapter); 6228 e1000_init_manageability_pt(adapter);
6171 6229
6172 if (netif_running(netdev))
6173 e1000e_up(adapter);
6174
6175 netif_device_attach(netdev);
6176
6177 /* If the controller has AMT, do not set DRV_LOAD until the interface 6230 /* If the controller has AMT, do not set DRV_LOAD until the interface
6178 * is up. For all other cases, let the f/w know that the h/w is now 6231 * is up. For all other cases, let the f/w know that the h/w is now
6179 * under the control of the driver. 6232 * under the control of the driver.
@@ -6184,75 +6237,111 @@ static int __e1000_resume(struct pci_dev *pdev)
6184 return 0; 6237 return 0;
6185} 6238}
6186 6239
6240static int e1000e_pm_thaw(struct device *dev)
6241{
6242 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6243 struct e1000_adapter *adapter = netdev_priv(netdev);
6244
6245 e1000e_set_interrupt_capability(adapter);
6246 if (netif_running(netdev)) {
6247 u32 err = e1000_request_irq(adapter);
6248
6249 if (err)
6250 return err;
6251
6252 e1000e_up(adapter);
6253 }
6254
6255 netif_device_attach(netdev);
6256
6257 return 0;
6258}
6259
6187#ifdef CONFIG_PM_SLEEP 6260#ifdef CONFIG_PM_SLEEP
6188static int e1000_suspend(struct device *dev) 6261static int e1000e_pm_suspend(struct device *dev)
6189{ 6262{
6190 struct pci_dev *pdev = to_pci_dev(dev); 6263 struct pci_dev *pdev = to_pci_dev(dev);
6191 6264
6265 e1000e_pm_freeze(dev);
6266
6192 return __e1000_shutdown(pdev, false); 6267 return __e1000_shutdown(pdev, false);
6193} 6268}
6194 6269
6195static int e1000_resume(struct device *dev) 6270static int e1000e_pm_resume(struct device *dev)
6196{ 6271{
6197 struct pci_dev *pdev = to_pci_dev(dev); 6272 struct pci_dev *pdev = to_pci_dev(dev);
6198 struct net_device *netdev = pci_get_drvdata(pdev); 6273 int rc;
6199 struct e1000_adapter *adapter = netdev_priv(netdev);
6200 6274
6201 if (e1000e_pm_ready(adapter)) 6275 rc = __e1000_resume(pdev);
6202 adapter->idle_check = true; 6276 if (rc)
6277 return rc;
6203 6278
6204 return __e1000_resume(pdev); 6279 return e1000e_pm_thaw(dev);
6205} 6280}
6206#endif /* CONFIG_PM_SLEEP */ 6281#endif /* CONFIG_PM_SLEEP */
6207 6282
6208#ifdef CONFIG_PM_RUNTIME 6283#ifdef CONFIG_PM_RUNTIME
6209static int e1000_runtime_suspend(struct device *dev) 6284static int e1000e_pm_runtime_idle(struct device *dev)
6210{ 6285{
6211 struct pci_dev *pdev = to_pci_dev(dev); 6286 struct pci_dev *pdev = to_pci_dev(dev);
6212 struct net_device *netdev = pci_get_drvdata(pdev); 6287 struct net_device *netdev = pci_get_drvdata(pdev);
6213 struct e1000_adapter *adapter = netdev_priv(netdev); 6288 struct e1000_adapter *adapter = netdev_priv(netdev);
6214 6289
6215 if (!e1000e_pm_ready(adapter)) 6290 if (!e1000e_has_link(adapter))
6216 return 0; 6291 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
6217 6292
6218 return __e1000_shutdown(pdev, true); 6293 return -EBUSY;
6219} 6294}
6220 6295
6221static int e1000_idle(struct device *dev) 6296static int e1000e_pm_runtime_resume(struct device *dev)
6222{ 6297{
6223 struct pci_dev *pdev = to_pci_dev(dev); 6298 struct pci_dev *pdev = to_pci_dev(dev);
6224 struct net_device *netdev = pci_get_drvdata(pdev); 6299 struct net_device *netdev = pci_get_drvdata(pdev);
6225 struct e1000_adapter *adapter = netdev_priv(netdev); 6300 struct e1000_adapter *adapter = netdev_priv(netdev);
6301 int rc;
6226 6302
6227 if (!e1000e_pm_ready(adapter)) 6303 rc = __e1000_resume(pdev);
6228 return 0; 6304 if (rc)
6305 return rc;
6229 6306
6230 if (adapter->idle_check) { 6307 if (netdev->flags & IFF_UP)
6231 adapter->idle_check = false; 6308 rc = e1000e_up(adapter);
6232 if (!e1000e_has_link(adapter))
6233 pm_schedule_suspend(dev, MSEC_PER_SEC);
6234 }
6235 6309
6236 return -EBUSY; 6310 return rc;
6237} 6311}
6238 6312
6239static int e1000_runtime_resume(struct device *dev) 6313static int e1000e_pm_runtime_suspend(struct device *dev)
6240{ 6314{
6241 struct pci_dev *pdev = to_pci_dev(dev); 6315 struct pci_dev *pdev = to_pci_dev(dev);
6242 struct net_device *netdev = pci_get_drvdata(pdev); 6316 struct net_device *netdev = pci_get_drvdata(pdev);
6243 struct e1000_adapter *adapter = netdev_priv(netdev); 6317 struct e1000_adapter *adapter = netdev_priv(netdev);
6244 6318
6245 if (!e1000e_pm_ready(adapter)) 6319 if (netdev->flags & IFF_UP) {
6246 return 0; 6320 int count = E1000_CHECK_RESET_COUNT;
6321
6322 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6323 usleep_range(10000, 20000);
6324
6325 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6326
6327 /* Down the device without resetting the hardware */
6328 e1000e_down(adapter, false);
6329 }
6247 6330
6248 adapter->idle_check = !dev->power.runtime_auto; 6331 if (__e1000_shutdown(pdev, true)) {
6249 return __e1000_resume(pdev); 6332 e1000e_pm_runtime_resume(dev);
6333 return -EBUSY;
6334 }
6335
6336 return 0;
6250} 6337}
6251#endif /* CONFIG_PM_RUNTIME */ 6338#endif /* CONFIG_PM_RUNTIME */
6252#endif /* CONFIG_PM */ 6339#endif /* CONFIG_PM */
6253 6340
6254static void e1000_shutdown(struct pci_dev *pdev) 6341static void e1000_shutdown(struct pci_dev *pdev)
6255{ 6342{
6343 e1000e_pm_freeze(&pdev->dev);
6344
6256 __e1000_shutdown(pdev, false); 6345 __e1000_shutdown(pdev, false);
6257} 6346}
6258 6347
@@ -6338,7 +6427,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6338 return PCI_ERS_RESULT_DISCONNECT; 6427 return PCI_ERS_RESULT_DISCONNECT;
6339 6428
6340 if (netif_running(netdev)) 6429 if (netif_running(netdev))
6341 e1000e_down(adapter); 6430 e1000e_down(adapter, true);
6342 pci_disable_device(pdev); 6431 pci_disable_device(pdev);
6343 6432
6344 /* Request a slot slot reset. */ 6433 /* Request a slot slot reset. */
@@ -6350,7 +6439,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6350 * @pdev: Pointer to PCI device 6439 * @pdev: Pointer to PCI device
6351 * 6440 *
6352 * Restart the card from scratch, as if from a cold-boot. Implementation 6441 * Restart the card from scratch, as if from a cold-boot. Implementation
6353 * resembles the first-half of the e1000_resume routine. 6442 * resembles the first-half of the e1000e_pm_resume routine.
6354 */ 6443 */
6355static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 6444static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6356{ 6445{
@@ -6397,7 +6486,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6397 * 6486 *
6398 * This callback is called when the error recovery driver tells us that 6487 * This callback is called when the error recovery driver tells us that
6399 * its OK to resume normal operation. Implementation resembles the 6488 * its OK to resume normal operation. Implementation resembles the
6400 * second-half of the e1000_resume routine. 6489 * second-half of the e1000e_pm_resume routine.
6401 */ 6490 */
6402static void e1000_io_resume(struct pci_dev *pdev) 6491static void e1000_io_resume(struct pci_dev *pdev)
6403{ 6492{
@@ -6902,9 +6991,6 @@ static void e1000_remove(struct pci_dev *pdev)
6902 } 6991 }
6903 } 6992 }
6904 6993
6905 if (!(netdev->flags & IFF_UP))
6906 e1000_power_down_phy(adapter);
6907
6908 /* Don't lie to e1000_close() down the road. */ 6994 /* Don't lie to e1000_close() down the road. */
6909 if (!down) 6995 if (!down)
6910 clear_bit(__E1000_DOWN, &adapter->state); 6996 clear_bit(__E1000_DOWN, &adapter->state);
@@ -7026,9 +7112,16 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
7026MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 7112MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7027 7113
7028static const struct dev_pm_ops e1000_pm_ops = { 7114static const struct dev_pm_ops e1000_pm_ops = {
7029 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 7115#ifdef CONFIG_PM_SLEEP
7030 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, 7116 .suspend = e1000e_pm_suspend,
7031 e1000_idle) 7117 .resume = e1000e_pm_resume,
7118 .freeze = e1000e_pm_freeze,
7119 .thaw = e1000e_pm_thaw,
7120 .poweroff = e1000e_pm_suspend,
7121 .restore = e1000e_pm_resume,
7122#endif
7123 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
7124 e1000e_pm_runtime_idle)
7032}; 7125};
7033 7126
7034/* PCI Device API Driver */ 7127/* PCI Device API Driver */
@@ -7055,7 +7148,7 @@ static int __init e1000_init_module(void)
7055 int ret; 7148 int ret;
7056 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 7149 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7057 e1000e_driver_version); 7150 e1000e_driver_version);
7058 pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n"); 7151 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
7059 ret = pci_register_driver(&e1000_driver); 7152 ret = pci_register_driver(&e1000_driver);
7060 7153
7061 return ret; 7154 return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index d70a03906ac0..a9a976f04bff 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 45fc69561627..342bf69efab5 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_NVM_H_ 22#ifndef _E1000E_NVM_H_
30#define _E1000E_NVM_H_ 23#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index c16bd75b6caa..d0ac0f3249c8 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include <linux/netdevice.h> 22#include <linux/netdevice.h>
30#include <linux/module.h> 23#include <linux/module.h>
@@ -381,6 +374,12 @@ void e1000e_check_options(struct e1000_adapter *adapter)
381 "%s set to dynamic mode\n", opt.name); 374 "%s set to dynamic mode\n", opt.name);
382 adapter->itr = 20000; 375 adapter->itr = 20000;
383 break; 376 break;
377 case 2:
378 dev_info(&adapter->pdev->dev,
379 "%s Invalid mode - setting default\n",
380 opt.name);
381 adapter->itr_setting = opt.def;
382 /* fall-through */
384 case 3: 383 case 3:
385 dev_info(&adapter->pdev->dev, 384 dev_info(&adapter->pdev->dev,
386 "%s set to dynamic conservative mode\n", 385 "%s set to dynamic conservative mode\n",
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 20e71f4ca426..00b3fc98bf30 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index f4f71b9991e3..3841bccf058c 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_PHY_H_ 22#ifndef _E1000E_PHY_H_
30#define _E1000E_PHY_H_ 23#define _E1000E_PHY_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 065f8c80d4f2..fb1a914a3ad4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* PTP 1588 Hardware Clock (PHC) 22/* PTP 1588 Hardware Clock (PHC)
30 * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) 23 * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
@@ -47,6 +40,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
47 ptp_clock_info); 40 ptp_clock_info);
48 struct e1000_hw *hw = &adapter->hw; 41 struct e1000_hw *hw = &adapter->hw;
49 bool neg_adj = false; 42 bool neg_adj = false;
43 unsigned long flags;
50 u64 adjustment; 44 u64 adjustment;
51 u32 timinca, incvalue; 45 u32 timinca, incvalue;
52 s32 ret_val; 46 s32 ret_val;
@@ -64,6 +58,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
64 if (ret_val) 58 if (ret_val)
65 return ret_val; 59 return ret_val;
66 60
61 spin_lock_irqsave(&adapter->systim_lock, flags);
62
67 incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; 63 incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
68 64
69 adjustment = incvalue; 65 adjustment = incvalue;
@@ -77,6 +73,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
77 73
78 ew32(TIMINCA, timinca); 74 ew32(TIMINCA, timinca);
79 75
76 spin_unlock_irqrestore(&adapter->systim_lock, flags);
77
80 return 0; 78 return 0;
81} 79}
82 80
@@ -191,6 +189,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
191 .n_alarm = 0, 189 .n_alarm = 0,
192 .n_ext_ts = 0, 190 .n_ext_ts = 0,
193 .n_per_out = 0, 191 .n_per_out = 0,
192 .n_pins = 0,
194 .pps = 0, 193 .pps = 0,
195 .adjfreq = e1000e_phc_adjfreq, 194 .adjfreq = e1000e_phc_adjfreq,
196 .adjtime = e1000e_phc_adjtime, 195 .adjtime = e1000e_phc_adjtime,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index a7e6a3e37257..ea235bbe50d3 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_REGS_H_ 22#ifndef _E1000E_REGS_H_
30#define _E1000E_REGS_H_ 23#define _E1000E_REGS_H_
@@ -39,6 +32,7 @@
39#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 32#define E1000_SCTL 0x00024 /* SerDes Control - RW */
40#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 33#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
41#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ 34#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
35#define E1000_FEXT 0x0002C /* Future Extended - RW */
42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ 36#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ 37#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ 38#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 72dae4d97b43..beb7b4393a6c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -86,12 +86,12 @@
86 86
87#define I40E_NVM_VERSION_LO_SHIFT 0 87#define I40E_NVM_VERSION_LO_SHIFT 0
88#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) 88#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
89#define I40E_NVM_VERSION_HI_SHIFT 8 89#define I40E_NVM_VERSION_HI_SHIFT 12
90#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT) 90#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
91 91
92/* The values in here are decimal coded as hex as is the case in the NVM map*/ 92/* The values in here are decimal coded as hex as is the case in the NVM map*/
93#define I40E_CURRENT_NVM_VERSION_HI 0x2 93#define I40E_CURRENT_NVM_VERSION_HI 0x2
94#define I40E_CURRENT_NVM_VERSION_LO 0x30 94#define I40E_CURRENT_NVM_VERSION_LO 0x40
95 95
96/* magic for getting defines into strings */ 96/* magic for getting defines into strings */
97#define STRINGIFY(foo) #foo 97#define STRINGIFY(foo) #foo
@@ -136,6 +136,7 @@ enum i40e_state_t {
136 __I40E_EMP_RESET_REQUESTED, 136 __I40E_EMP_RESET_REQUESTED,
137 __I40E_FILTER_OVERFLOW_PROMISC, 137 __I40E_FILTER_OVERFLOW_PROMISC,
138 __I40E_SUSPENDED, 138 __I40E_SUSPENDED,
139 __I40E_BAD_EEPROM,
139}; 140};
140 141
141enum i40e_interrupt_policy { 142enum i40e_interrupt_policy {
@@ -152,8 +153,21 @@ struct i40e_lump_tracking {
152}; 153};
153 154
154#define I40E_DEFAULT_ATR_SAMPLE_RATE 20 155#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
155#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512 156#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
156struct i40e_fdir_data { 157#define I40E_FDIR_BUFFER_FULL_MARGIN 10
158#define I40E_FDIR_BUFFER_HEAD_ROOM 200
159
160struct i40e_fdir_filter {
161 struct hlist_node fdir_node;
162 /* filter ipnut set */
163 u8 flow_type;
164 u8 ip4_proto;
165 __be32 dst_ip[4];
166 __be32 src_ip[4];
167 __be16 src_port;
168 __be16 dst_port;
169 __be32 sctp_v_tag;
170 /* filter control */
157 u16 q_index; 171 u16 q_index;
158 u8 flex_off; 172 u8 flex_off;
159 u8 pctype; 173 u8 pctype;
@@ -162,7 +176,6 @@ struct i40e_fdir_data {
162 u8 fd_status; 176 u8 fd_status;
163 u16 cnt_index; 177 u16 cnt_index;
164 u32 fd_id; 178 u32 fd_id;
165 u8 *raw_packet;
166}; 179};
167 180
168#define I40E_ETH_P_LLDP 0x88cc 181#define I40E_ETH_P_LLDP 0x88cc
@@ -196,7 +209,7 @@ struct i40e_pf {
196 bool fc_autoneg_status; 209 bool fc_autoneg_status;
197 210
198 u16 eeprom_version; 211 u16 eeprom_version;
199 u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */ 212 u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
200 u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ 213 u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
201 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ 214 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
202 u16 num_req_vfs; /* num vfs requested for this vf */ 215 u16 num_req_vfs; /* num vfs requested for this vf */
@@ -210,6 +223,9 @@ struct i40e_pf {
210 u8 atr_sample_rate; 223 u8 atr_sample_rate;
211 bool wol_en; 224 bool wol_en;
212 225
226 struct hlist_head fdir_filter_list;
227 u16 fdir_pf_active_filters;
228
213#ifdef CONFIG_I40E_VXLAN 229#ifdef CONFIG_I40E_VXLAN
214 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; 230 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
215 u16 pending_vxlan_bitmap; 231 u16 pending_vxlan_bitmap;
@@ -251,6 +267,9 @@ struct i40e_pf {
251#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) 267#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
252#endif 268#endif
253 269
270 /* tracks features that get auto disabled by errors */
271 u64 auto_disable_flags;
272
254 bool stat_offsets_loaded; 273 bool stat_offsets_loaded;
255 struct i40e_hw_port_stats stats; 274 struct i40e_hw_port_stats stats;
256 struct i40e_hw_port_stats stats_offsets; 275 struct i40e_hw_port_stats stats_offsets;
@@ -477,10 +496,10 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
477 "f%d.%d a%d.%d n%02x.%02x e%08x", 496 "f%d.%d a%d.%d n%02x.%02x e%08x",
478 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, 497 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
479 hw->aq.api_maj_ver, hw->aq.api_min_ver, 498 hw->aq.api_maj_ver, hw->aq.api_min_ver,
480 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) 499 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
481 >> I40E_NVM_VERSION_HI_SHIFT, 500 I40E_NVM_VERSION_HI_SHIFT,
482 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) 501 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
483 >> I40E_NVM_VERSION_LO_SHIFT, 502 I40E_NVM_VERSION_LO_SHIFT,
484 hw->nvm.eetrack); 503 hw->nvm.eetrack);
485 504
486 return buf; 505 return buf;
@@ -534,9 +553,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
534int i40e_fetch_switch_configuration(struct i40e_pf *pf, 553int i40e_fetch_switch_configuration(struct i40e_pf *pf,
535 bool printconfig); 554 bool printconfig);
536 555
537int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, 556int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
538 struct i40e_pf *pf, bool add); 557 struct i40e_pf *pf, bool add);
539 558int i40e_add_del_fdir(struct i40e_vsi *vsi,
559 struct i40e_fdir_filter *input, bool add);
560void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
561int i40e_get_current_fd_count(struct i40e_pf *pf);
562bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
540void i40e_set_ethtool_ops(struct net_device *netdev); 563void i40e_set_ethtool_ops(struct net_device *netdev);
541struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 564struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
542 u8 *macaddr, s16 vlan, 565 u8 *macaddr, s16 vlan,
@@ -575,6 +598,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
575void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); 598void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
576void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); 599void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
577int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 600int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
601int i40e_vsi_open(struct i40e_vsi *vsi);
578void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 602void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
579int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 603int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
580int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); 604int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index a50e6b3479ae..ed3902bf249b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
647 desc_cb = *desc; 647 desc_cb = *desc;
648 cb_func(hw, &desc_cb); 648 cb_func(hw, &desc_cb);
649 } 649 }
650 memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 650 memset(desc, 0, sizeof(*desc));
651 memset((void *)details, 0, 651 memset(details, 0, sizeof(*details));
652 sizeof(struct i40e_asq_cmd_details));
653 ntc++; 652 ntc++;
654 if (ntc == asq->count) 653 if (ntc == asq->count)
655 ntc = 0; 654 ntc = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e7f38b57834d..922cdcc45c54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -162,6 +162,372 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
162 return status; 162 return status;
163} 163}
164 164
165/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
166 * hardware to a bit-field that can be used by SW to more easily determine the
167 * packet type.
168 *
169 * Macros are used to shorten the table lines and make this table human
170 * readable.
171 *
172 * We store the PTYPE in the top byte of the bit field - this is just so that
173 * we can check that the table doesn't have a row missing, as the index into
174 * the table should be the PTYPE.
175 *
176 * Typical work flow:
177 *
178 * IF NOT i40e_ptype_lookup[ptype].known
179 * THEN
180 * Packet is unknown
181 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
182 * Use the rest of the fields to look at the tunnels, inner protocols, etc
183 * ELSE
184 * Use the enum i40e_rx_l2_ptype to decode the packet type
185 * ENDIF
186 */
187
188/* macro to make the table lines short */
189#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
190 { PTYPE, \
191 1, \
192 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
193 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
194 I40E_RX_PTYPE_##OUTER_FRAG, \
195 I40E_RX_PTYPE_TUNNEL_##T, \
196 I40E_RX_PTYPE_TUNNEL_END_##TE, \
197 I40E_RX_PTYPE_##TEF, \
198 I40E_RX_PTYPE_INNER_PROT_##I, \
199 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
200
201#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
202 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
203
204/* shorter macros makes the table fit but are terse */
205#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
206#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
207#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
208
209/* Lookup table mapping the HW PTYPE to the bit field for decoding */
210struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
211 /* L2 Packet types */
212 I40E_PTT_UNUSED_ENTRY(0),
213 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
214 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
215 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
216 I40E_PTT_UNUSED_ENTRY(4),
217 I40E_PTT_UNUSED_ENTRY(5),
218 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
219 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
220 I40E_PTT_UNUSED_ENTRY(8),
221 I40E_PTT_UNUSED_ENTRY(9),
222 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
223 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
224 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
225 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
226 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
227 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
228 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
229 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
230 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
231 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
232 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
233 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
234
235 /* Non Tunneled IPv4 */
236 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
237 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
238 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
239 I40E_PTT_UNUSED_ENTRY(25),
240 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
241 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
242 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
243
244 /* IPv4 --> IPv4 */
245 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
246 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
247 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
248 I40E_PTT_UNUSED_ENTRY(32),
249 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
250 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
251 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
252
253 /* IPv4 --> IPv6 */
254 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
255 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
256 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
257 I40E_PTT_UNUSED_ENTRY(39),
258 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
259 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
260 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
261
262 /* IPv4 --> GRE/NAT */
263 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
264
265 /* IPv4 --> GRE/NAT --> IPv4 */
266 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
267 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
268 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
269 I40E_PTT_UNUSED_ENTRY(47),
270 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
271 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
272 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
273
274 /* IPv4 --> GRE/NAT --> IPv6 */
275 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
276 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
277 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
278 I40E_PTT_UNUSED_ENTRY(54),
279 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
280 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
281 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
282
283 /* IPv4 --> GRE/NAT --> MAC */
284 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
285
286 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
287 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
288 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
289 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
290 I40E_PTT_UNUSED_ENTRY(62),
291 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
292 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
293 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
294
295 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
296 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
297 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
298 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
299 I40E_PTT_UNUSED_ENTRY(69),
300 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
301 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
302 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
303
304 /* IPv4 --> GRE/NAT --> MAC/VLAN */
305 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
306
307 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
308 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
309 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
310 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
311 I40E_PTT_UNUSED_ENTRY(77),
312 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
313 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
314 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
315
316 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
317 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
318 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
319 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
320 I40E_PTT_UNUSED_ENTRY(84),
321 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
322 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
323 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
324
325 /* Non Tunneled IPv6 */
326 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
327 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
328 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
329 I40E_PTT_UNUSED_ENTRY(91),
330 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
331 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
332 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
333
334 /* IPv6 --> IPv4 */
335 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
336 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
337 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
338 I40E_PTT_UNUSED_ENTRY(98),
339 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
340 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
341 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
342
343 /* IPv6 --> IPv6 */
344 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
345 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
346 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
347 I40E_PTT_UNUSED_ENTRY(105),
348 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
349 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
350 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
351
352 /* IPv6 --> GRE/NAT */
353 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
354
355 /* IPv6 --> GRE/NAT -> IPv4 */
356 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
357 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
358 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
359 I40E_PTT_UNUSED_ENTRY(113),
360 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
361 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
362 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
363
364 /* IPv6 --> GRE/NAT -> IPv6 */
365 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
366 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
367 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
368 I40E_PTT_UNUSED_ENTRY(120),
369 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
370 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
371 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
372
373 /* IPv6 --> GRE/NAT -> MAC */
374 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
375
376 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
377 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
378 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
379 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
380 I40E_PTT_UNUSED_ENTRY(128),
381 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
382 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
383 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
384
385 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
386 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
387 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
388 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
389 I40E_PTT_UNUSED_ENTRY(135),
390 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
391 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
392 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
393
394 /* IPv6 --> GRE/NAT -> MAC/VLAN */
395 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
396
397 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
398 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
399 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
400 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
401 I40E_PTT_UNUSED_ENTRY(143),
402 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
403 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
404 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
405
406 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
407 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
408 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
409 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
410 I40E_PTT_UNUSED_ENTRY(150),
411 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
412 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
413 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
414
415 /* unused entries */
416 I40E_PTT_UNUSED_ENTRY(154),
417 I40E_PTT_UNUSED_ENTRY(155),
418 I40E_PTT_UNUSED_ENTRY(156),
419 I40E_PTT_UNUSED_ENTRY(157),
420 I40E_PTT_UNUSED_ENTRY(158),
421 I40E_PTT_UNUSED_ENTRY(159),
422
423 I40E_PTT_UNUSED_ENTRY(160),
424 I40E_PTT_UNUSED_ENTRY(161),
425 I40E_PTT_UNUSED_ENTRY(162),
426 I40E_PTT_UNUSED_ENTRY(163),
427 I40E_PTT_UNUSED_ENTRY(164),
428 I40E_PTT_UNUSED_ENTRY(165),
429 I40E_PTT_UNUSED_ENTRY(166),
430 I40E_PTT_UNUSED_ENTRY(167),
431 I40E_PTT_UNUSED_ENTRY(168),
432 I40E_PTT_UNUSED_ENTRY(169),
433
434 I40E_PTT_UNUSED_ENTRY(170),
435 I40E_PTT_UNUSED_ENTRY(171),
436 I40E_PTT_UNUSED_ENTRY(172),
437 I40E_PTT_UNUSED_ENTRY(173),
438 I40E_PTT_UNUSED_ENTRY(174),
439 I40E_PTT_UNUSED_ENTRY(175),
440 I40E_PTT_UNUSED_ENTRY(176),
441 I40E_PTT_UNUSED_ENTRY(177),
442 I40E_PTT_UNUSED_ENTRY(178),
443 I40E_PTT_UNUSED_ENTRY(179),
444
445 I40E_PTT_UNUSED_ENTRY(180),
446 I40E_PTT_UNUSED_ENTRY(181),
447 I40E_PTT_UNUSED_ENTRY(182),
448 I40E_PTT_UNUSED_ENTRY(183),
449 I40E_PTT_UNUSED_ENTRY(184),
450 I40E_PTT_UNUSED_ENTRY(185),
451 I40E_PTT_UNUSED_ENTRY(186),
452 I40E_PTT_UNUSED_ENTRY(187),
453 I40E_PTT_UNUSED_ENTRY(188),
454 I40E_PTT_UNUSED_ENTRY(189),
455
456 I40E_PTT_UNUSED_ENTRY(190),
457 I40E_PTT_UNUSED_ENTRY(191),
458 I40E_PTT_UNUSED_ENTRY(192),
459 I40E_PTT_UNUSED_ENTRY(193),
460 I40E_PTT_UNUSED_ENTRY(194),
461 I40E_PTT_UNUSED_ENTRY(195),
462 I40E_PTT_UNUSED_ENTRY(196),
463 I40E_PTT_UNUSED_ENTRY(197),
464 I40E_PTT_UNUSED_ENTRY(198),
465 I40E_PTT_UNUSED_ENTRY(199),
466
467 I40E_PTT_UNUSED_ENTRY(200),
468 I40E_PTT_UNUSED_ENTRY(201),
469 I40E_PTT_UNUSED_ENTRY(202),
470 I40E_PTT_UNUSED_ENTRY(203),
471 I40E_PTT_UNUSED_ENTRY(204),
472 I40E_PTT_UNUSED_ENTRY(205),
473 I40E_PTT_UNUSED_ENTRY(206),
474 I40E_PTT_UNUSED_ENTRY(207),
475 I40E_PTT_UNUSED_ENTRY(208),
476 I40E_PTT_UNUSED_ENTRY(209),
477
478 I40E_PTT_UNUSED_ENTRY(210),
479 I40E_PTT_UNUSED_ENTRY(211),
480 I40E_PTT_UNUSED_ENTRY(212),
481 I40E_PTT_UNUSED_ENTRY(213),
482 I40E_PTT_UNUSED_ENTRY(214),
483 I40E_PTT_UNUSED_ENTRY(215),
484 I40E_PTT_UNUSED_ENTRY(216),
485 I40E_PTT_UNUSED_ENTRY(217),
486 I40E_PTT_UNUSED_ENTRY(218),
487 I40E_PTT_UNUSED_ENTRY(219),
488
489 I40E_PTT_UNUSED_ENTRY(220),
490 I40E_PTT_UNUSED_ENTRY(221),
491 I40E_PTT_UNUSED_ENTRY(222),
492 I40E_PTT_UNUSED_ENTRY(223),
493 I40E_PTT_UNUSED_ENTRY(224),
494 I40E_PTT_UNUSED_ENTRY(225),
495 I40E_PTT_UNUSED_ENTRY(226),
496 I40E_PTT_UNUSED_ENTRY(227),
497 I40E_PTT_UNUSED_ENTRY(228),
498 I40E_PTT_UNUSED_ENTRY(229),
499
500 I40E_PTT_UNUSED_ENTRY(230),
501 I40E_PTT_UNUSED_ENTRY(231),
502 I40E_PTT_UNUSED_ENTRY(232),
503 I40E_PTT_UNUSED_ENTRY(233),
504 I40E_PTT_UNUSED_ENTRY(234),
505 I40E_PTT_UNUSED_ENTRY(235),
506 I40E_PTT_UNUSED_ENTRY(236),
507 I40E_PTT_UNUSED_ENTRY(237),
508 I40E_PTT_UNUSED_ENTRY(238),
509 I40E_PTT_UNUSED_ENTRY(239),
510
511 I40E_PTT_UNUSED_ENTRY(240),
512 I40E_PTT_UNUSED_ENTRY(241),
513 I40E_PTT_UNUSED_ENTRY(242),
514 I40E_PTT_UNUSED_ENTRY(243),
515 I40E_PTT_UNUSED_ENTRY(244),
516 I40E_PTT_UNUSED_ENTRY(245),
517 I40E_PTT_UNUSED_ENTRY(246),
518 I40E_PTT_UNUSED_ENTRY(247),
519 I40E_PTT_UNUSED_ENTRY(248),
520 I40E_PTT_UNUSED_ENTRY(249),
521
522 I40E_PTT_UNUSED_ENTRY(250),
523 I40E_PTT_UNUSED_ENTRY(251),
524 I40E_PTT_UNUSED_ENTRY(252),
525 I40E_PTT_UNUSED_ENTRY(253),
526 I40E_PTT_UNUSED_ENTRY(254),
527 I40E_PTT_UNUSED_ENTRY(255)
528};
529
530
165/** 531/**
166 * i40e_init_shared_code - Initialize the shared code 532 * i40e_init_shared_code - Initialize the shared code
167 * @hw: pointer to hardware structure 533 * @hw: pointer to hardware structure
@@ -1409,9 +1775,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
1409 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 1775 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
1410 1776
1411 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 1777 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
1412 p = (struct i40e_hw_capabilities *)&hw->dev_caps; 1778 p = &hw->dev_caps;
1413 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 1779 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
1414 p = (struct i40e_hw_capabilities *)&hw->func_caps; 1780 p = &hw->func_caps;
1415 else 1781 else
1416 return; 1782 return;
1417 1783
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 50730141bb7b..036570d76176 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -332,6 +332,7 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
332 u16 type; 332 u16 type;
333 u16 length; 333 u16 length;
334 u16 typelength; 334 u16 typelength;
335 u16 offset = 0;
335 336
336 if (!lldpmib || !dcbcfg) 337 if (!lldpmib || !dcbcfg)
337 return I40E_ERR_PARAM; 338 return I40E_ERR_PARAM;
@@ -339,15 +340,17 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
339 /* set to the start of LLDPDU */ 340 /* set to the start of LLDPDU */
340 lldpmib += ETH_HLEN; 341 lldpmib += ETH_HLEN;
341 tlv = (struct i40e_lldp_org_tlv *)lldpmib; 342 tlv = (struct i40e_lldp_org_tlv *)lldpmib;
342 while (tlv) { 343 while (1) {
343 typelength = ntohs(tlv->typelength); 344 typelength = ntohs(tlv->typelength);
344 type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> 345 type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
345 I40E_LLDP_TLV_TYPE_SHIFT); 346 I40E_LLDP_TLV_TYPE_SHIFT);
346 length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> 347 length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
347 I40E_LLDP_TLV_LEN_SHIFT); 348 I40E_LLDP_TLV_LEN_SHIFT);
349 offset += sizeof(typelength) + length;
348 350
349 if (type == I40E_TLV_TYPE_END) 351 /* END TLV or beyond LLDPDU size */
350 break;/* END TLV break out */ 352 if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
353 break;
351 354
352 switch (type) { 355 switch (type) {
353 case I40E_TLV_TYPE_ORG: 356 case I40E_TLV_TYPE_ORG:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da22c3fa2c00..3c37386fd138 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1011,10 +1011,12 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
1011 **/ 1011 **/
1012static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) 1012static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
1013{ 1013{
1014 if (enable) 1014 if (enable) {
1015 pf->flags |= flag; 1015 pf->flags |= flag;
1016 else 1016 } else {
1017 pf->flags &= ~flag; 1017 pf->flags &= ~flag;
1018 pf->auto_disable_flags |= flag;
1019 }
1018 dev_info(&pf->pdev->dev, "requesting a pf reset\n"); 1020 dev_info(&pf->pdev->dev, "requesting a pf reset\n");
1019 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); 1021 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
1020} 1022}
@@ -1467,19 +1469,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1467 pf->msg_enable); 1469 pf->msg_enable);
1468 } 1470 }
1469 } else if (strncmp(cmd_buf, "pfr", 3) == 0) { 1471 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1470 dev_info(&pf->pdev->dev, "forcing PFR\n"); 1472 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1471 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); 1473 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
1472 1474
1473 } else if (strncmp(cmd_buf, "corer", 5) == 0) { 1475 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1474 dev_info(&pf->pdev->dev, "forcing CoreR\n"); 1476 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1475 i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); 1477 i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
1476 1478
1477 } else if (strncmp(cmd_buf, "globr", 5) == 0) { 1479 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1478 dev_info(&pf->pdev->dev, "forcing GlobR\n"); 1480 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1479 i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); 1481 i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
1480 1482
1481 } else if (strncmp(cmd_buf, "empr", 4) == 0) { 1483 } else if (strncmp(cmd_buf, "empr", 4) == 0) {
1482 dev_info(&pf->pdev->dev, "forcing EMPR\n"); 1484 dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
1483 i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); 1485 i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
1484 1486
1485 } else if (strncmp(cmd_buf, "read", 4) == 0) { 1487 } else if (strncmp(cmd_buf, "read", 4) == 0) {
@@ -1663,28 +1665,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1663 desc = NULL; 1665 desc = NULL;
1664 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || 1666 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
1665 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { 1667 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
1666 struct i40e_fdir_data fd_data; 1668 struct i40e_fdir_filter fd_data;
1667 u16 packet_len, i, j = 0; 1669 u16 packet_len, i, j = 0;
1668 char *asc_packet; 1670 char *asc_packet;
1671 u8 *raw_packet;
1669 bool add = false; 1672 bool add = false;
1670 int ret; 1673 int ret;
1671 1674
1672 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 1675 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
1676 goto command_write_done;
1677
1678 if (strncmp(cmd_buf, "add", 3) == 0)
1679 add = true;
1680
1681 if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1682 goto command_write_done;
1683
1684 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
1673 GFP_KERNEL); 1685 GFP_KERNEL);
1674 if (!asc_packet) 1686 if (!asc_packet)
1675 goto command_write_done; 1687 goto command_write_done;
1676 1688
1677 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 1689 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
1678 GFP_KERNEL); 1690 GFP_KERNEL);
1679 1691
1680 if (!fd_data.raw_packet) { 1692 if (!raw_packet) {
1681 kfree(asc_packet); 1693 kfree(asc_packet);
1682 asc_packet = NULL; 1694 asc_packet = NULL;
1683 goto command_write_done; 1695 goto command_write_done;
1684 } 1696 }
1685 1697
1686 if (strncmp(cmd_buf, "add", 3) == 0)
1687 add = true;
1688 cnt = sscanf(&cmd_buf[13], 1698 cnt = sscanf(&cmd_buf[13],
1689 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", 1699 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
1690 &fd_data.q_index, 1700 &fd_data.q_index,
@@ -1698,36 +1708,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1698 cnt); 1708 cnt);
1699 kfree(asc_packet); 1709 kfree(asc_packet);
1700 asc_packet = NULL; 1710 asc_packet = NULL;
1701 kfree(fd_data.raw_packet); 1711 kfree(raw_packet);
1702 goto command_write_done; 1712 goto command_write_done;
1703 } 1713 }
1704 1714
1705 /* fix packet length if user entered 0 */ 1715 /* fix packet length if user entered 0 */
1706 if (packet_len == 0) 1716 if (packet_len == 0)
1707 packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP; 1717 packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
1708 1718
1709 /* make sure to check the max as well */ 1719 /* make sure to check the max as well */
1710 packet_len = min_t(u16, 1720 packet_len = min_t(u16,
1711 packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); 1721 packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
1712 1722
1713 for (i = 0; i < packet_len; i++) { 1723 for (i = 0; i < packet_len; i++) {
1714 sscanf(&asc_packet[j], "%2hhx ", 1724 sscanf(&asc_packet[j], "%2hhx ",
1715 &fd_data.raw_packet[i]); 1725 &raw_packet[i]);
1716 j += 3; 1726 j += 3;
1717 } 1727 }
1718 dev_info(&pf->pdev->dev, "FD raw packet dump\n"); 1728 dev_info(&pf->pdev->dev, "FD raw packet dump\n");
1719 print_hex_dump(KERN_INFO, "FD raw packet: ", 1729 print_hex_dump(KERN_INFO, "FD raw packet: ",
1720 DUMP_PREFIX_OFFSET, 16, 1, 1730 DUMP_PREFIX_OFFSET, 16, 1,
1721 fd_data.raw_packet, packet_len, true); 1731 raw_packet, packet_len, true);
1722 ret = i40e_program_fdir_filter(&fd_data, pf, add); 1732 ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
1723 if (!ret) { 1733 if (!ret) {
1724 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); 1734 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
1725 } else { 1735 } else {
1726 dev_info(&pf->pdev->dev, 1736 dev_info(&pf->pdev->dev,
1727 "Filter command send failed %d\n", ret); 1737 "Filter command send failed %d\n", ret);
1728 } 1738 }
1729 kfree(fd_data.raw_packet); 1739 kfree(raw_packet);
1730 fd_data.raw_packet = NULL; 1740 raw_packet = NULL;
1731 kfree(asc_packet); 1741 kfree(asc_packet);
1732 asc_packet = NULL; 1742 asc_packet = NULL;
1733 } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { 1743 } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
@@ -2077,9 +2087,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
2077 if (!vsi) { 2087 if (!vsi) {
2078 dev_info(&pf->pdev->dev, 2088 dev_info(&pf->pdev->dev,
2079 "tx_timeout: VSI %d not found\n", vsi_seid); 2089 "tx_timeout: VSI %d not found\n", vsi_seid);
2080 goto netdev_ops_write_done; 2090 } else if (!vsi->netdev) {
2081 } 2091 dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
2082 if (rtnl_trylock()) { 2092 vsi_seid);
2093 } else if (test_bit(__I40E_DOWN, &vsi->state)) {
2094 dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
2095 vsi_seid);
2096 } else if (rtnl_trylock()) {
2083 vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev); 2097 vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
2084 rtnl_unlock(); 2098 rtnl_unlock();
2085 dev_info(&pf->pdev->dev, "tx_timeout called\n"); 2099 dev_info(&pf->pdev->dev, "tx_timeout called\n");
@@ -2098,9 +2112,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
2098 if (!vsi) { 2112 if (!vsi) {
2099 dev_info(&pf->pdev->dev, 2113 dev_info(&pf->pdev->dev,
2100 "change_mtu: VSI %d not found\n", vsi_seid); 2114 "change_mtu: VSI %d not found\n", vsi_seid);
2101 goto netdev_ops_write_done; 2115 } else if (!vsi->netdev) {
2102 } 2116 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
2103 if (rtnl_trylock()) { 2117 vsi_seid);
2118 } else if (rtnl_trylock()) {
2104 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, 2119 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
2105 mtu); 2120 mtu);
2106 rtnl_unlock(); 2121 rtnl_unlock();
@@ -2119,9 +2134,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
2119 if (!vsi) { 2134 if (!vsi) {
2120 dev_info(&pf->pdev->dev, 2135 dev_info(&pf->pdev->dev,
2121 "set_rx_mode: VSI %d not found\n", vsi_seid); 2136 "set_rx_mode: VSI %d not found\n", vsi_seid);
2122 goto netdev_ops_write_done; 2137 } else if (!vsi->netdev) {
2123 } 2138 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
2124 if (rtnl_trylock()) { 2139 vsi_seid);
2140 } else if (rtnl_trylock()) {
2125 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); 2141 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
2126 rtnl_unlock(); 2142 rtnl_unlock();
2127 dev_info(&pf->pdev->dev, "set_rx_mode called\n"); 2143 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
@@ -2139,11 +2155,14 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
2139 if (!vsi) { 2155 if (!vsi) {
2140 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", 2156 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
2141 vsi_seid); 2157 vsi_seid);
2142 goto netdev_ops_write_done; 2158 } else if (!vsi->netdev) {
2159 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
2160 vsi_seid);
2161 } else {
2162 for (i = 0; i < vsi->num_q_vectors; i++)
2163 napi_schedule(&vsi->q_vectors[i]->napi);
2164 dev_info(&pf->pdev->dev, "napi called\n");
2143 } 2165 }
2144 for (i = 0; i < vsi->num_q_vectors; i++)
2145 napi_schedule(&vsi->q_vectors[i]->napi);
2146 dev_info(&pf->pdev->dev, "napi called\n");
2147 } else { 2166 } else {
2148 dev_info(&pf->pdev->dev, "unknown command '%s'\n", 2167 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
2149 i40e_dbg_netdev_ops_buf); 2168 i40e_dbg_netdev_ops_buf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b1d7d8c5cb9b..03d99cbc5c25 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -62,6 +62,9 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
62 I40E_NETDEV_STAT(rx_crc_errors), 62 I40E_NETDEV_STAT(rx_crc_errors),
63}; 63};
64 64
65static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
66 struct ethtool_rxnfc *cmd);
67
65/* These PF_STATs might look like duplicates of some NETDEV_STATs, 68/* These PF_STATs might look like duplicates of some NETDEV_STATs,
66 * but they are separate. This device supports Virtualization, and 69 * but they are separate. This device supports Virtualization, and
67 * as such might have several netdevs supporting VMDq and FCoE going 70 * as such might have several netdevs supporting VMDq and FCoE going
@@ -84,6 +87,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
84 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), 87 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
85 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 88 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
86 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 89 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
90 I40E_PF_STAT("tx_timeout", tx_timeout_count),
87 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 91 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
88 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 92 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
89 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 93 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -110,6 +114,11 @@ static struct i40e_stats i40e_gstrings_stats[] = {
110 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 114 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
111 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), 115 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
112 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 116 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
117 /* LPI stats */
118 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
119 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
120 I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
121 I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
113}; 122};
114 123
115#define I40E_QUEUE_STATS_LEN(n) \ 124#define I40E_QUEUE_STATS_LEN(n) \
@@ -387,7 +396,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
387 ret_val = i40e_aq_read_nvm(hw, 0x0, 396 ret_val = i40e_aq_read_nvm(hw, 0x0,
388 eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), 397 eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
389 len, 398 len,
390 (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), 399 eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
391 last, NULL); 400 last, NULL);
392 if (ret_val) { 401 if (ret_val) {
393 dev_info(&pf->pdev->dev, 402 dev_info(&pf->pdev->dev,
@@ -399,7 +408,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
399 408
400release_nvm: 409release_nvm:
401 i40e_release_nvm(hw); 410 i40e_release_nvm(hw);
402 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); 411 memcpy(bytes, eeprom_buff, eeprom->len);
403free_buff: 412free_buff:
404 kfree(eeprom_buff); 413 kfree(eeprom_buff);
405 return ret_val; 414 return ret_val;
@@ -649,18 +658,18 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
649 658
650 /* process Tx ring statistics */ 659 /* process Tx ring statistics */
651 do { 660 do {
652 start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 661 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
653 data[i] = tx_ring->stats.packets; 662 data[i] = tx_ring->stats.packets;
654 data[i + 1] = tx_ring->stats.bytes; 663 data[i + 1] = tx_ring->stats.bytes;
655 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 664 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
656 665
657 /* Rx ring is the 2nd half of the queue pair */ 666 /* Rx ring is the 2nd half of the queue pair */
658 rx_ring = &tx_ring[1]; 667 rx_ring = &tx_ring[1];
659 do { 668 do {
660 start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 669 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
661 data[i + 2] = rx_ring->stats.packets; 670 data[i + 2] = rx_ring->stats.packets;
662 data[i + 3] = rx_ring->stats.bytes; 671 data[i + 3] = rx_ring->stats.bytes;
663 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 672 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
664 } 673 }
665 rcu_read_unlock(); 674 rcu_read_unlock();
666 if (vsi == pf->vsi[pf->lan_vsi]) { 675 if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1112,6 +1121,84 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1112} 1121}
1113 1122
1114/** 1123/**
1124 * i40e_get_ethtool_fdir_all - Populates the rule count of a command
1125 * @pf: Pointer to the physical function struct
1126 * @cmd: The command to get or set Rx flow classification rules
1127 * @rule_locs: Array of used rule locations
1128 *
1129 * This function populates both the total and actual rule count of
1130 * the ethtool flow classification command
1131 *
1132 * Returns 0 on success or -EMSGSIZE if entry not found
1133 **/
1134static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1135 struct ethtool_rxnfc *cmd,
1136 u32 *rule_locs)
1137{
1138 struct i40e_fdir_filter *rule;
1139 struct hlist_node *node2;
1140 int cnt = 0;
1141
1142 /* report total rule count */
1143 cmd->data = pf->hw.fdir_shared_filter_count +
1144 pf->fdir_pf_filter_count;
1145
1146 hlist_for_each_entry_safe(rule, node2,
1147 &pf->fdir_filter_list, fdir_node) {
1148 if (cnt == cmd->rule_cnt)
1149 return -EMSGSIZE;
1150
1151 rule_locs[cnt] = rule->fd_id;
1152 cnt++;
1153 }
1154
1155 cmd->rule_cnt = cnt;
1156
1157 return 0;
1158}
1159
1160/**
1161 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
1162 * @pf: Pointer to the physical function struct
1163 * @cmd: The command to get or set Rx flow classification rules
1164 *
1165 * This function looks up a filter based on the Rx flow classification
1166 * command and fills the flow spec info for it if found
1167 *
1168 * Returns 0 on success or -EINVAL if filter not found
1169 **/
1170static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1171 struct ethtool_rxnfc *cmd)
1172{
1173 struct ethtool_rx_flow_spec *fsp =
1174 (struct ethtool_rx_flow_spec *)&cmd->fs;
1175 struct i40e_fdir_filter *rule = NULL;
1176 struct hlist_node *node2;
1177
1178 /* report total rule count */
1179 cmd->data = pf->hw.fdir_shared_filter_count +
1180 pf->fdir_pf_filter_count;
1181
1182 hlist_for_each_entry_safe(rule, node2,
1183 &pf->fdir_filter_list, fdir_node) {
1184 if (fsp->location <= rule->fd_id)
1185 break;
1186 }
1187
1188 if (!rule || fsp->location != rule->fd_id)
1189 return -EINVAL;
1190
1191 fsp->flow_type = rule->flow_type;
1192 fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
1193 fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
1194 fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
1195 fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
1196 fsp->ring_cookie = rule->q_index;
1197
1198 return 0;
1199}
1200
1201/**
1115 * i40e_get_rxnfc - command to get RX flow classification rules 1202 * i40e_get_rxnfc - command to get RX flow classification rules
1116 * @netdev: network interface device structure 1203 * @netdev: network interface device structure
1117 * @cmd: ethtool rxnfc command 1204 * @cmd: ethtool rxnfc command
@@ -1135,15 +1222,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1135 ret = i40e_get_rss_hash_opts(pf, cmd); 1222 ret = i40e_get_rss_hash_opts(pf, cmd);
1136 break; 1223 break;
1137 case ETHTOOL_GRXCLSRLCNT: 1224 case ETHTOOL_GRXCLSRLCNT:
1138 cmd->rule_cnt = 10; 1225 cmd->rule_cnt = pf->fdir_pf_active_filters;
1139 ret = 0; 1226 ret = 0;
1140 break; 1227 break;
1141 case ETHTOOL_GRXCLSRULE: 1228 case ETHTOOL_GRXCLSRULE:
1142 ret = 0; 1229 ret = i40e_get_ethtool_fdir_entry(pf, cmd);
1143 break; 1230 break;
1144 case ETHTOOL_GRXCLSRLALL: 1231 case ETHTOOL_GRXCLSRLALL:
1145 cmd->data = 500; 1232 ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
1146 ret = 0; 1233 break;
1147 default: 1234 default:
1148 break; 1235 break;
1149 } 1236 }
@@ -1274,289 +1361,182 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1274 return 0; 1361 return 0;
1275} 1362}
1276 1363
1277#define IP_HEADER_OFFSET 14
1278#define I40E_UDPIP_DUMMY_PACKET_LEN 42
1279/** 1364/**
1280 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for 1365 * i40e_match_fdir_input_set - Match a new filter against an existing one
1281 * a specific flow spec 1366 * @rule: The filter already added
1282 * @vsi: pointer to the targeted VSI 1367 * @input: The new filter to comapre against
1283 * @fd_data: the flow director data required from the FDir descriptor
1284 * @ethtool_rx_flow_spec: the flow spec
1285 * @add: true adds a filter, false removes it
1286 * 1368 *
1287 * Returns 0 if the filters were successfully added or removed 1369 * Returns true if the two input set match
1288 **/ 1370 **/
1289static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, 1371static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
1290 struct i40e_fdir_data *fd_data, 1372 struct i40e_fdir_filter *input)
1291 struct ethtool_rx_flow_spec *fsp, bool add)
1292{ 1373{
1293 struct i40e_pf *pf = vsi->back; 1374 if ((rule->dst_ip[0] != input->dst_ip[0]) ||
1294 struct udphdr *udp; 1375 (rule->src_ip[0] != input->src_ip[0]) ||
1295 struct iphdr *ip; 1376 (rule->dst_port != input->dst_port) ||
1296 bool err = false; 1377 (rule->src_port != input->src_port))
1297 int ret; 1378 return false;
1298 int i; 1379 return true;
1299 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1300 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
1301 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1302 0, 0, 0, 0, 0, 0, 0, 0};
1303
1304 memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
1305
1306 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1307 udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1308 + sizeof(struct iphdr));
1309
1310 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1311 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1312 udp->source = fsp->h_u.tcp_ip4_spec.psrc;
1313 udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1314
1315 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
1316 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
1317 fd_data->pctype = i;
1318 ret = i40e_program_fdir_filter(fd_data, pf, add);
1319
1320 if (ret) {
1321 dev_info(&pf->pdev->dev,
1322 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1323 fd_data->pctype, ret);
1324 err = true;
1325 } else {
1326 dev_info(&pf->pdev->dev,
1327 "Filter OK for PCTYPE %d (ret = %d)\n",
1328 fd_data->pctype, ret);
1329 }
1330 }
1331
1332 return err ? -EOPNOTSUPP : 0;
1333} 1380}
1334 1381
1335#define I40E_TCPIP_DUMMY_PACKET_LEN 54
1336/** 1382/**
1337 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for 1383 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
1338 * a specific flow spec 1384 * @vsi: Pointer to the targeted VSI
1339 * @vsi: pointer to the targeted VSI 1385 * @input: The filter to update or NULL to indicate deletion
1340 * @fd_data: the flow director data required from the FDir descriptor 1386 * @sw_idx: Software index to the filter
1341 * @ethtool_rx_flow_spec: the flow spec 1387 * @cmd: The command to get or set Rx flow classification rules
1342 * @add: true adds a filter, false removes it
1343 * 1388 *
1344 * Returns 0 if the filters were successfully added or removed 1389 * This function updates (or deletes) a Flow Director entry from
1390 * the hlist of the corresponding PF
1391 *
1392 * Returns 0 on success
1345 **/ 1393 **/
1346static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, 1394static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
1347 struct i40e_fdir_data *fd_data, 1395 struct i40e_fdir_filter *input,
1348 struct ethtool_rx_flow_spec *fsp, bool add) 1396 u16 sw_idx,
1397 struct ethtool_rxnfc *cmd)
1349{ 1398{
1399 struct i40e_fdir_filter *rule, *parent;
1350 struct i40e_pf *pf = vsi->back; 1400 struct i40e_pf *pf = vsi->back;
1351 struct tcphdr *tcp; 1401 struct hlist_node *node2;
1352 struct iphdr *ip; 1402 int err = -EINVAL;
1353 bool err = false;
1354 int ret;
1355 /* Dummy packet */
1356 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1357 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
1358 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1359 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1360 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
1361
1362 memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
1363
1364 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1365 tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1366 + sizeof(struct iphdr));
1367
1368 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1369 tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1370 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1371 tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
1372
1373 if (add) {
1374 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
1375 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
1376 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
1377 }
1378 }
1379 1403
1380 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; 1404 parent = NULL;
1381 ret = i40e_program_fdir_filter(fd_data, pf, add); 1405 rule = NULL;
1382 1406
1383 if (ret) { 1407 hlist_for_each_entry_safe(rule, node2,
1384 dev_info(&pf->pdev->dev, 1408 &pf->fdir_filter_list, fdir_node) {
1385 "Filter command send failed for PCTYPE %d (ret = %d)\n", 1409 /* hash found, or no matching entry */
1386 fd_data->pctype, ret); 1410 if (rule->fd_id >= sw_idx)
1387 err = true; 1411 break;
1388 } else { 1412 parent = rule;
1389 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1390 fd_data->pctype, ret);
1391 } 1413 }
1392 1414
1393 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 1415 /* if there is an old rule occupying our place remove it */
1394 1416 if (rule && (rule->fd_id == sw_idx)) {
1395 ret = i40e_program_fdir_filter(fd_data, pf, add); 1417 if (input && !i40e_match_fdir_input_set(rule, input))
1396 if (ret) { 1418 err = i40e_add_del_fdir(vsi, rule, false);
1397 dev_info(&pf->pdev->dev, 1419 else if (!input)
1398 "Filter command send failed for PCTYPE %d (ret = %d)\n", 1420 err = i40e_add_del_fdir(vsi, rule, false);
1399 fd_data->pctype, ret); 1421 hlist_del(&rule->fdir_node);
1400 err = true; 1422 kfree(rule);
1401 } else { 1423 pf->fdir_pf_active_filters--;
1402 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1403 fd_data->pctype, ret);
1404 } 1424 }
1405 1425
1406 return err ? -EOPNOTSUPP : 0; 1426 /* If no input this was a delete, err should be 0 if a rule was
1407} 1427 * successfully found and removed from the list else -EINVAL
1428 */
1429 if (!input)
1430 return err;
1408 1431
1409/** 1432 /* initialize node and set software index */
1410 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for 1433 INIT_HLIST_NODE(&input->fdir_node);
1411 * a specific flow spec 1434
1412 * @vsi: pointer to the targeted VSI 1435 /* add filter to the list */
1413 * @fd_data: the flow director data required from the FDir descriptor 1436 if (parent)
1414 * @ethtool_rx_flow_spec: the flow spec 1437 hlist_add_after(&parent->fdir_node, &input->fdir_node);
1415 * @add: true adds a filter, false removes it 1438 else
1416 * 1439 hlist_add_head(&input->fdir_node,
1417 * Returns 0 if the filters were successfully added or removed 1440 &pf->fdir_filter_list);
1418 **/ 1441
1419static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, 1442 /* update counts */
1420 struct i40e_fdir_data *fd_data, 1443 pf->fdir_pf_active_filters++;
1421 struct ethtool_rx_flow_spec *fsp, bool add) 1444
1422{ 1445 return 0;
1423 return -EOPNOTSUPP;
1424} 1446}
1425 1447
1426#define I40E_IP_DUMMY_PACKET_LEN 34
1427/** 1448/**
1428 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for 1449 * i40e_del_fdir_entry - Deletes a Flow Director filter entry
1429 * a specific flow spec 1450 * @vsi: Pointer to the targeted VSI
1430 * @vsi: pointer to the targeted VSI 1451 * @cmd: The command to get or set Rx flow classification rules
1431 * @fd_data: the flow director data required for the FDir descriptor
1432 * @fsp: the ethtool flow spec
1433 * @add: true adds a filter, false removes it
1434 * 1452 *
1435 * Returns 0 if the filters were successfully added or removed 1453 * The function removes a Flow Director filter entry from the
1436 **/ 1454 * hlist of the corresponding PF
1437static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, 1455 *
1438 struct i40e_fdir_data *fd_data, 1456 * Returns 0 on success
1439 struct ethtool_rx_flow_spec *fsp, bool add) 1457 */
1458static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
1459 struct ethtool_rxnfc *cmd)
1440{ 1460{
1461 struct ethtool_rx_flow_spec *fsp =
1462 (struct ethtool_rx_flow_spec *)&cmd->fs;
1441 struct i40e_pf *pf = vsi->back; 1463 struct i40e_pf *pf = vsi->back;
1442 struct iphdr *ip; 1464 int ret = 0;
1443 bool err = false;
1444 int ret;
1445 int i;
1446 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1447 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
1448 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1449
1450 memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
1451 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1452 1465
1453 ip->saddr = fsp->h_u.usr_ip4_spec.ip4src; 1466 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
1454 ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
1455 ip->protocol = fsp->h_u.usr_ip4_spec.proto;
1456 1467
1457 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 1468 i40e_fdir_check_and_reenable(pf);
1458 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { 1469 return ret;
1459 fd_data->pctype = i;
1460 ret = i40e_program_fdir_filter(fd_data, pf, add);
1461
1462 if (ret) {
1463 dev_info(&pf->pdev->dev,
1464 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1465 fd_data->pctype, ret);
1466 err = true;
1467 } else {
1468 dev_info(&pf->pdev->dev,
1469 "Filter OK for PCTYPE %d (ret = %d)\n",
1470 fd_data->pctype, ret);
1471 }
1472 }
1473
1474 return err ? -EOPNOTSUPP : 0;
1475} 1470}
1476 1471
1477/** 1472/**
1478 * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for 1473 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
1479 * a specific flow spec based on their protocol
1480 * @vsi: pointer to the targeted VSI 1474 * @vsi: pointer to the targeted VSI
1481 * @cmd: command to get or set RX flow classification rules 1475 * @cmd: command to get or set RX flow classification rules
1482 * @add: true adds a filter, false removes it
1483 * 1476 *
1484 * Returns 0 if the filters were successfully added or removed 1477 * Add Flow Director filters for a specific flow spec based on their
1478 * protocol. Returns 0 if the filters were successfully added.
1485 **/ 1479 **/
1486static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi, 1480static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1487 struct ethtool_rxnfc *cmd, bool add) 1481 struct ethtool_rxnfc *cmd)
1488{ 1482{
1489 struct i40e_fdir_data fd_data; 1483 struct ethtool_rx_flow_spec *fsp;
1490 int ret = -EINVAL; 1484 struct i40e_fdir_filter *input;
1491 struct i40e_pf *pf; 1485 struct i40e_pf *pf;
1492 struct ethtool_rx_flow_spec *fsp = 1486 int ret = -EINVAL;
1493 (struct ethtool_rx_flow_spec *)&cmd->fs;
1494 1487
1495 if (!vsi) 1488 if (!vsi)
1496 return -EINVAL; 1489 return -EINVAL;
1497 1490
1498 pf = vsi->back; 1491 pf = vsi->back;
1499 1492
1500 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 1493 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
1501 (fsp->ring_cookie >= vsi->num_queue_pairs)) 1494 return -EOPNOTSUPP;
1502 return -EINVAL;
1503 1495
1504 /* Populate the Flow Director that we have at the moment 1496 if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
1505 * and allocate the raw packet buffer for the calling functions 1497 return -ENOSPC;
1506 */
1507 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1508 GFP_KERNEL);
1509 1498
1510 if (!fd_data.raw_packet) { 1499 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1511 dev_info(&pf->pdev->dev, "Could not allocate memory\n"); 1500
1512 return -ENOMEM; 1501 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
1502 pf->hw.func_caps.fd_filters_guaranteed)) {
1503 return -EINVAL;
1513 } 1504 }
1514 1505
1515 fd_data.q_index = fsp->ring_cookie; 1506 if (fsp->ring_cookie >= vsi->num_queue_pairs)
1516 fd_data.flex_off = 0; 1507 return -EINVAL;
1517 fd_data.pctype = 0;
1518 fd_data.dest_vsi = vsi->id;
1519 fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1520 fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1521 fd_data.cnt_index = 0;
1522 fd_data.fd_id = 0;
1523 1508
1524 switch (fsp->flow_type & ~FLOW_EXT) { 1509 input = kzalloc(sizeof(*input), GFP_KERNEL);
1525 case TCP_V4_FLOW: 1510
1526 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); 1511 if (!input)
1527 break; 1512 return -ENOMEM;
1528 case UDP_V4_FLOW:
1529 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1530 break;
1531 case SCTP_V4_FLOW:
1532 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1533 break;
1534 case IPV4_FLOW:
1535 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1536 break;
1537 case IP_USER_FLOW:
1538 switch (fsp->h_u.usr_ip4_spec.proto) {
1539 case IPPROTO_TCP:
1540 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1541 break;
1542 case IPPROTO_UDP:
1543 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1544 break;
1545 case IPPROTO_SCTP:
1546 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1547 break;
1548 default:
1549 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1550 break;
1551 }
1552 break;
1553 default:
1554 dev_info(&pf->pdev->dev, "Could not specify spec type\n");
1555 ret = -EINVAL;
1556 }
1557 1513
1558 kfree(fd_data.raw_packet); 1514 input->fd_id = fsp->location;
1559 fd_data.raw_packet = NULL; 1515
1516 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
1517 input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1518 else
1519 input->dest_ctl =
1520 I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1521
1522 input->q_index = fsp->ring_cookie;
1523 input->flex_off = 0;
1524 input->pctype = 0;
1525 input->dest_vsi = vsi->id;
1526 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1527 input->cnt_index = 0;
1528 input->flow_type = fsp->flow_type;
1529 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
1530 input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
1531 input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1532 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
1533 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
1534
1535 ret = i40e_add_del_fdir(vsi, input, true);
1536 if (ret)
1537 kfree(input);
1538 else
1539 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
1560 1540
1561 return ret; 1541 return ret;
1562} 1542}
@@ -1580,10 +1560,10 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1580 ret = i40e_set_rss_hash_opt(pf, cmd); 1560 ret = i40e_set_rss_hash_opt(pf, cmd);
1581 break; 1561 break;
1582 case ETHTOOL_SRXCLSRLINS: 1562 case ETHTOOL_SRXCLSRLINS:
1583 ret = i40e_add_del_fdir_ethtool(vsi, cmd, true); 1563 ret = i40e_add_fdir_ethtool(vsi, cmd);
1584 break; 1564 break;
1585 case ETHTOOL_SRXCLSRLDEL: 1565 case ETHTOOL_SRXCLSRLDEL:
1586 ret = i40e_add_del_fdir_ethtool(vsi, cmd, false); 1566 ret = i40e_del_fdir_entry(vsi, cmd);
1587 break; 1567 break;
1588 default: 1568 default:
1589 break; 1569 break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b901371ca361..861b722c2672 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,6 +26,7 @@
26 26
27/* Local includes */ 27/* Local includes */
28#include "i40e.h" 28#include "i40e.h"
29#include "i40e_diag.h"
29#ifdef CONFIG_I40E_VXLAN 30#ifdef CONFIG_I40E_VXLAN
30#include <net/vxlan.h> 31#include <net/vxlan.h>
31#endif 32#endif
@@ -38,7 +39,7 @@ static const char i40e_driver_string[] =
38 39
39#define DRV_VERSION_MAJOR 0 40#define DRV_VERSION_MAJOR 0
40#define DRV_VERSION_MINOR 3 41#define DRV_VERSION_MINOR 3
41#define DRV_VERSION_BUILD 30 42#define DRV_VERSION_BUILD 36
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -305,6 +306,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
305 break; 306 break;
306 default: 307 default:
307 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 308 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
309 set_bit(__I40E_DOWN, &vsi->state);
308 i40e_down(vsi); 310 i40e_down(vsi);
309 break; 311 break;
310 } 312 }
@@ -375,20 +377,20 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
375 continue; 377 continue;
376 378
377 do { 379 do {
378 start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 380 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
379 packets = tx_ring->stats.packets; 381 packets = tx_ring->stats.packets;
380 bytes = tx_ring->stats.bytes; 382 bytes = tx_ring->stats.bytes;
381 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 383 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
382 384
383 stats->tx_packets += packets; 385 stats->tx_packets += packets;
384 stats->tx_bytes += bytes; 386 stats->tx_bytes += bytes;
385 rx_ring = &tx_ring[1]; 387 rx_ring = &tx_ring[1];
386 388
387 do { 389 do {
388 start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 390 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
389 packets = rx_ring->stats.packets; 391 packets = rx_ring->stats.packets;
390 bytes = rx_ring->stats.bytes; 392 bytes = rx_ring->stats.bytes;
391 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 393 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
392 394
393 stats->rx_packets += packets; 395 stats->rx_packets += packets;
394 stats->rx_bytes += bytes; 396 stats->rx_bytes += bytes;
@@ -739,6 +741,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
739 u32 rx_page, rx_buf; 741 u32 rx_page, rx_buf;
740 u64 rx_p, rx_b; 742 u64 rx_p, rx_b;
741 u64 tx_p, tx_b; 743 u64 tx_p, tx_b;
744 u32 val;
742 int i; 745 int i;
743 u16 q; 746 u16 q;
744 747
@@ -769,10 +772,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
769 p = ACCESS_ONCE(vsi->tx_rings[q]); 772 p = ACCESS_ONCE(vsi->tx_rings[q]);
770 773
771 do { 774 do {
772 start = u64_stats_fetch_begin_bh(&p->syncp); 775 start = u64_stats_fetch_begin_irq(&p->syncp);
773 packets = p->stats.packets; 776 packets = p->stats.packets;
774 bytes = p->stats.bytes; 777 bytes = p->stats.bytes;
775 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 778 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
776 tx_b += bytes; 779 tx_b += bytes;
777 tx_p += packets; 780 tx_p += packets;
778 tx_restart += p->tx_stats.restart_queue; 781 tx_restart += p->tx_stats.restart_queue;
@@ -781,10 +784,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
781 /* Rx queue is part of the same block as Tx queue */ 784 /* Rx queue is part of the same block as Tx queue */
782 p = &p[1]; 785 p = &p[1];
783 do { 786 do {
784 start = u64_stats_fetch_begin_bh(&p->syncp); 787 start = u64_stats_fetch_begin_irq(&p->syncp);
785 packets = p->stats.packets; 788 packets = p->stats.packets;
786 bytes = p->stats.bytes; 789 bytes = p->stats.bytes;
787 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 790 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
788 rx_b += bytes; 791 rx_b += bytes;
789 rx_p += packets; 792 rx_p += packets;
790 rx_buf += p->rx_stats.alloc_buff_failed; 793 rx_buf += p->rx_stats.alloc_buff_failed;
@@ -971,6 +974,20 @@ void i40e_update_stats(struct i40e_vsi *vsi)
971 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 974 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
972 pf->stat_offsets_loaded, 975 pf->stat_offsets_loaded,
973 &osd->rx_jabber, &nsd->rx_jabber); 976 &osd->rx_jabber, &nsd->rx_jabber);
977
978 val = rd32(hw, I40E_PRTPM_EEE_STAT);
979 nsd->tx_lpi_status =
980 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
981 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
982 nsd->rx_lpi_status =
983 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
984 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
985 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
986 pf->stat_offsets_loaded,
987 &osd->tx_lpi_count, &nsd->tx_lpi_count);
988 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
989 pf->stat_offsets_loaded,
990 &osd->rx_lpi_count, &nsd->rx_lpi_count);
974 } 991 }
975 992
976 pf->stat_offsets_loaded = true; 993 pf->stat_offsets_loaded = true;
@@ -1964,11 +1981,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1964 1981
1965 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 1982 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1966 1983
1967 /* If the network stack called us with vid = 0, we should 1984 /* If the network stack called us with vid = 0 then
1968 * indicate to i40e_vsi_add_vlan() that we want to receive 1985 * it is asking to receive priority tagged packets with
1969 * any traffic (i.e. with any vlan tag, or untagged) 1986 * vlan id 0. Our HW receives them by default when configured
1987 * to receive untagged packets so there is no need to add an
1988 * extra filter for vlan 0 tagged packets.
1970 */ 1989 */
1971 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1990 if (vid)
1991 ret = i40e_vsi_add_vlan(vsi, vid);
1972 1992
1973 if (!ret && (vid < VLAN_N_VID)) 1993 if (!ret && (vid < VLAN_N_VID))
1974 set_bit(vid, vsi->active_vlans); 1994 set_bit(vid, vsi->active_vlans);
@@ -1981,7 +2001,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1981 * @netdev: network interface to be adjusted 2001 * @netdev: network interface to be adjusted
1982 * @vid: vlan id to be removed 2002 * @vid: vlan id to be removed
1983 * 2003 *
1984 * net_device_ops implementation for adding vlan ids 2004 * net_device_ops implementation for removing vlan ids
1985 **/ 2005 **/
1986static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2006static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1987 __always_unused __be16 proto, u16 vid) 2007 __always_unused __be16 proto, u16 vid)
@@ -2177,6 +2197,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
2177 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2197 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2178 I40E_FLAG_FD_ATR_ENABLED)); 2198 I40E_FLAG_FD_ATR_ENABLED));
2179 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2199 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2200 /* FDIR VSI tx ring can still use RS bit and writebacks */
2201 if (vsi->type != I40E_VSI_FDIR)
2202 tx_ctx.head_wb_ena = 1;
2203 tx_ctx.head_wb_addr = ring->dma +
2204 (ring->count * sizeof(struct i40e_tx_desc));
2180 2205
2181 /* As part of VSI creation/update, FW allocates certain 2206 /* As part of VSI creation/update, FW allocates certain
2182 * Tx arbitration queue sets for each TC enabled for 2207 * Tx arbitration queue sets for each TC enabled for
@@ -2420,6 +2445,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2420} 2445}
2421 2446
2422/** 2447/**
2448 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2449 * @vsi: Pointer to the targeted VSI
2450 *
2451 * This function replays the hlist on the hw where all the SB Flow Director
2452 * filters were saved.
2453 **/
2454static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2455{
2456 struct i40e_fdir_filter *filter;
2457 struct i40e_pf *pf = vsi->back;
2458 struct hlist_node *node;
2459
2460 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2461 return;
2462
2463 hlist_for_each_entry_safe(filter, node,
2464 &pf->fdir_filter_list, fdir_node) {
2465 i40e_add_del_fdir(vsi, filter, true);
2466 }
2467}
2468
2469/**
2423 * i40e_vsi_configure - Set up the VSI for action 2470 * i40e_vsi_configure - Set up the VSI for action
2424 * @vsi: the VSI being configured 2471 * @vsi: the VSI being configured
2425 **/ 2472 **/
@@ -2557,7 +2604,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2557 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2604 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2558 wr32(hw, I40E_PFINT_LNKLST0, 0); 2605 wr32(hw, I40E_PFINT_LNKLST0, 0);
2559 2606
2560 /* Associate the queue pair to the vector and enable the q int */ 2607 /* Associate the queue pair to the vector and enable the queue int */
2561 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2608 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2562 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2609 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2563 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2610 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
@@ -2831,12 +2878,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
2831 val = rd32(hw, I40E_GLGEN_RSTAT); 2878 val = rd32(hw, I40E_GLGEN_RSTAT);
2832 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 2879 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2833 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 2880 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2834 if (val == I40E_RESET_CORER) 2881 if (val == I40E_RESET_CORER) {
2835 pf->corer_count++; 2882 pf->corer_count++;
2836 else if (val == I40E_RESET_GLOBR) 2883 } else if (val == I40E_RESET_GLOBR) {
2837 pf->globr_count++; 2884 pf->globr_count++;
2838 else if (val == I40E_RESET_EMPR) 2885 } else if (val == I40E_RESET_EMPR) {
2839 pf->empr_count++; 2886 pf->empr_count++;
2887 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
2888 }
2840 } 2889 }
2841 2890
2842 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 2891 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
@@ -2866,8 +2915,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
2866 icr0_remaining); 2915 icr0_remaining);
2867 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 2916 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2868 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 2917 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2869 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || 2918 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
2870 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2871 dev_info(&pf->pdev->dev, "device will be reset\n"); 2919 dev_info(&pf->pdev->dev, "device will be reset\n");
2872 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2920 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2873 i40e_service_event_schedule(pf); 2921 i40e_service_event_schedule(pf);
@@ -3107,13 +3155,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3107 3155
3108 pf_q = vsi->base_queue; 3156 pf_q = vsi->base_queue;
3109 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3157 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3110 j = 1000; 3158 for (j = 0; j < 50; j++) {
3111 do {
3112 usleep_range(1000, 2000);
3113 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3159 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3114 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) 3160 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3115 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); 3161 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3116 3162 break;
3163 usleep_range(1000, 2000);
3164 }
3117 /* Skip if the queue is already in the requested state */ 3165 /* Skip if the queue is already in the requested state */
3118 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3166 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3119 continue; 3167 continue;
@@ -3123,8 +3171,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3123 /* turn on/off the queue */ 3171 /* turn on/off the queue */
3124 if (enable) { 3172 if (enable) {
3125 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3173 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3126 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | 3174 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3127 I40E_QTX_ENA_QENA_STAT_MASK;
3128 } else { 3175 } else {
3129 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3176 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3130 } 3177 }
@@ -3171,12 +3218,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3171 3218
3172 pf_q = vsi->base_queue; 3219 pf_q = vsi->base_queue;
3173 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3220 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3174 j = 1000; 3221 for (j = 0; j < 50; j++) {
3175 do {
3176 usleep_range(1000, 2000);
3177 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3222 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3178 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) 3223 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3179 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); 3224 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3225 break;
3226 usleep_range(1000, 2000);
3227 }
3180 3228
3181 if (enable) { 3229 if (enable) {
3182 /* is STAT set ? */ 3230 /* is STAT set ? */
@@ -3190,11 +3238,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3190 3238
3191 /* turn on/off the queue */ 3239 /* turn on/off the queue */
3192 if (enable) 3240 if (enable)
3193 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | 3241 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3194 I40E_QRX_ENA_QENA_STAT_MASK;
3195 else 3242 else
3196 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | 3243 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3197 I40E_QRX_ENA_QENA_STAT_MASK);
3198 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3244 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3199 3245
3200 /* wait for the change to finish */ 3246 /* wait for the change to finish */
@@ -3732,8 +3778,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3732 NULL); 3778 NULL);
3733 if (aq_ret) { 3779 if (aq_ret) {
3734 dev_info(&vsi->back->pdev->dev, 3780 dev_info(&vsi->back->pdev->dev,
3735 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3781 "AQ command Config VSI BW allocation per TC failed = %d\n",
3736 __func__, vsi->back->hw.aq.asq_last_status); 3782 vsi->back->hw.aq.asq_last_status);
3737 return -EINVAL; 3783 return -EINVAL;
3738 } 3784 }
3739 3785
@@ -4062,6 +4108,10 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
4062 } else if (vsi->netdev) { 4108 } else if (vsi->netdev) {
4063 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4109 netdev_info(vsi->netdev, "NIC Link is Down\n");
4064 } 4110 }
4111
4112 /* replay FDIR SB filters */
4113 if (vsi->type == I40E_VSI_FDIR)
4114 i40e_fdir_filter_restore(vsi);
4065 i40e_service_event_schedule(pf); 4115 i40e_service_event_schedule(pf);
4066 4116
4067 return 0; 4117 return 0;
@@ -4208,15 +4258,40 @@ static int i40e_open(struct net_device *netdev)
4208 struct i40e_netdev_priv *np = netdev_priv(netdev); 4258 struct i40e_netdev_priv *np = netdev_priv(netdev);
4209 struct i40e_vsi *vsi = np->vsi; 4259 struct i40e_vsi *vsi = np->vsi;
4210 struct i40e_pf *pf = vsi->back; 4260 struct i40e_pf *pf = vsi->back;
4211 char int_name[IFNAMSIZ];
4212 int err; 4261 int err;
4213 4262
4214 /* disallow open during test */ 4263 /* disallow open during test or if eeprom is broken */
4215 if (test_bit(__I40E_TESTING, &pf->state)) 4264 if (test_bit(__I40E_TESTING, &pf->state) ||
4265 test_bit(__I40E_BAD_EEPROM, &pf->state))
4216 return -EBUSY; 4266 return -EBUSY;
4217 4267
4218 netif_carrier_off(netdev); 4268 netif_carrier_off(netdev);
4219 4269
4270 err = i40e_vsi_open(vsi);
4271 if (err)
4272 return err;
4273
4274#ifdef CONFIG_I40E_VXLAN
4275 vxlan_get_rx_port(netdev);
4276#endif
4277
4278 return 0;
4279}
4280
4281/**
4282 * i40e_vsi_open -
4283 * @vsi: the VSI to open
4284 *
4285 * Finish initialization of the VSI.
4286 *
4287 * Returns 0 on success, negative value on failure
4288 **/
4289int i40e_vsi_open(struct i40e_vsi *vsi)
4290{
4291 struct i40e_pf *pf = vsi->back;
4292 char int_name[IFNAMSIZ];
4293 int err;
4294
4220 /* allocate descriptors */ 4295 /* allocate descriptors */
4221 err = i40e_vsi_setup_tx_resources(vsi); 4296 err = i40e_vsi_setup_tx_resources(vsi);
4222 if (err) 4297 if (err)
@@ -4229,18 +4304,22 @@ static int i40e_open(struct net_device *netdev)
4229 if (err) 4304 if (err)
4230 goto err_setup_rx; 4305 goto err_setup_rx;
4231 4306
4307 if (!vsi->netdev) {
4308 err = EINVAL;
4309 goto err_setup_rx;
4310 }
4232 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4311 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4233 dev_driver_string(&pf->pdev->dev), netdev->name); 4312 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4234 err = i40e_vsi_request_irq(vsi, int_name); 4313 err = i40e_vsi_request_irq(vsi, int_name);
4235 if (err) 4314 if (err)
4236 goto err_setup_rx; 4315 goto err_setup_rx;
4237 4316
4238 /* Notify the stack of the actual queue counts. */ 4317 /* Notify the stack of the actual queue counts. */
4239 err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs); 4318 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
4240 if (err) 4319 if (err)
4241 goto err_set_queues; 4320 goto err_set_queues;
4242 4321
4243 err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs); 4322 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
4244 if (err) 4323 if (err)
4245 goto err_set_queues; 4324 goto err_set_queues;
4246 4325
@@ -4248,10 +4327,6 @@ static int i40e_open(struct net_device *netdev)
4248 if (err) 4327 if (err)
4249 goto err_up_complete; 4328 goto err_up_complete;
4250 4329
4251#ifdef CONFIG_I40E_VXLAN
4252 vxlan_get_rx_port(netdev);
4253#endif
4254
4255 return 0; 4330 return 0;
4256 4331
4257err_up_complete: 4332err_up_complete:
@@ -4269,6 +4344,26 @@ err_setup_tx:
4269} 4344}
4270 4345
4271/** 4346/**
4347 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4348 * @pf: Pointer to pf
4349 *
4350 * This function destroys the hlist where all the Flow Director
4351 * filters were saved.
4352 **/
4353static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4354{
4355 struct i40e_fdir_filter *filter;
4356 struct hlist_node *node2;
4357
4358 hlist_for_each_entry_safe(filter, node2,
4359 &pf->fdir_filter_list, fdir_node) {
4360 hlist_del(&filter->fdir_node);
4361 kfree(filter);
4362 }
4363 pf->fdir_pf_active_filters = 0;
4364}
4365
4366/**
4272 * i40e_close - Disables a network interface 4367 * i40e_close - Disables a network interface
4273 * @netdev: network interface device structure 4368 * @netdev: network interface device structure
4274 * 4369 *
@@ -4321,7 +4416,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4321 * for the warning interrupt will deal with the shutdown 4416 * for the warning interrupt will deal with the shutdown
4322 * and recovery of the switch setup. 4417 * and recovery of the switch setup.
4323 */ 4418 */
4324 dev_info(&pf->pdev->dev, "GlobalR requested\n"); 4419 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
4325 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4420 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4326 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 4421 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4327 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4422 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4332,7 +4427,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4332 * 4427 *
4333 * Same as Global Reset, except does *not* include the MAC/PHY 4428 * Same as Global Reset, except does *not* include the MAC/PHY
4334 */ 4429 */
4335 dev_info(&pf->pdev->dev, "CoreR requested\n"); 4430 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
4336 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4431 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4337 val |= I40E_GLGEN_RTRIG_CORER_MASK; 4432 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4338 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4433 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4366,7 +4461,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4366 * the switch, since we need to do all the recovery as 4461 * the switch, since we need to do all the recovery as
4367 * for the Core Reset. 4462 * for the Core Reset.
4368 */ 4463 */
4369 dev_info(&pf->pdev->dev, "PFR requested\n"); 4464 dev_dbg(&pf->pdev->dev, "PFR requested\n");
4370 i40e_handle_reset_warning(pf); 4465 i40e_handle_reset_warning(pf);
4371 4466
4372 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 4467 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
@@ -4415,18 +4510,18 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4415 &old_cfg->etscfg.prioritytable, 4510 &old_cfg->etscfg.prioritytable,
4416 sizeof(new_cfg->etscfg.prioritytable))) { 4511 sizeof(new_cfg->etscfg.prioritytable))) {
4417 need_reconfig = true; 4512 need_reconfig = true;
4418 dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n"); 4513 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4419 } 4514 }
4420 4515
4421 if (memcmp(&new_cfg->etscfg.tcbwtable, 4516 if (memcmp(&new_cfg->etscfg.tcbwtable,
4422 &old_cfg->etscfg.tcbwtable, 4517 &old_cfg->etscfg.tcbwtable,
4423 sizeof(new_cfg->etscfg.tcbwtable))) 4518 sizeof(new_cfg->etscfg.tcbwtable)))
4424 dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 4519 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4425 4520
4426 if (memcmp(&new_cfg->etscfg.tsatable, 4521 if (memcmp(&new_cfg->etscfg.tsatable,
4427 &old_cfg->etscfg.tsatable, 4522 &old_cfg->etscfg.tsatable,
4428 sizeof(new_cfg->etscfg.tsatable))) 4523 sizeof(new_cfg->etscfg.tsatable)))
4429 dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n"); 4524 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4430 } 4525 }
4431 4526
4432 /* Check if PFC configuration has changed */ 4527 /* Check if PFC configuration has changed */
@@ -4434,7 +4529,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4434 &old_cfg->pfc, 4529 &old_cfg->pfc,
4435 sizeof(new_cfg->pfc))) { 4530 sizeof(new_cfg->pfc))) {
4436 need_reconfig = true; 4531 need_reconfig = true;
4437 dev_info(&pf->pdev->dev, "PFC config change detected.\n"); 4532 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4438 } 4533 }
4439 4534
4440 /* Check if APP Table has changed */ 4535 /* Check if APP Table has changed */
@@ -4442,7 +4537,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4442 &old_cfg->app, 4537 &old_cfg->app,
4443 sizeof(new_cfg->app))) { 4538 sizeof(new_cfg->app))) {
4444 need_reconfig = true; 4539 need_reconfig = true;
4445 dev_info(&pf->pdev->dev, "APP Table change detected.\n"); 4540 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
4446 } 4541 }
4447 4542
4448 return need_reconfig; 4543 return need_reconfig;
@@ -4492,7 +4587,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4492 4587
4493 /* No change detected in DCBX configs */ 4588 /* No change detected in DCBX configs */
4494 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { 4589 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
4495 dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 4590 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4496 goto exit; 4591 goto exit;
4497 } 4592 }
4498 4593
@@ -4550,8 +4645,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4550 struct i40e_vf *vf; 4645 struct i40e_vf *vf;
4551 u16 vf_id; 4646 u16 vf_id;
4552 4647
4553 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", 4648 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
4554 __func__, queue, qtx_ctl); 4649 queue, qtx_ctl);
4555 4650
4556 /* Queue belongs to VF, find the VF and issue VF reset */ 4651 /* Queue belongs to VF, find the VF and issue VF reset */
4557 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 4652 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
@@ -4581,6 +4676,54 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
4581} 4676}
4582 4677
4583/** 4678/**
4679 * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
4680 * @pf: board private structure
4681 **/
4682int i40e_get_current_fd_count(struct i40e_pf *pf)
4683{
4684 int val, fcnt_prog;
4685 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
4686 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
4687 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
4688 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
4689 return fcnt_prog;
4690}
4691
4692/**
4693 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
4694 * @pf: board private structure
4695 **/
4696void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4697{
4698 u32 fcnt_prog, fcnt_avail;
4699
4700 /* Check if, FD SB or ATR was auto disabled and if there is enough room
4701 * to re-enable
4702 */
4703 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4704 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4705 return;
4706 fcnt_prog = i40e_get_current_fd_count(pf);
4707 fcnt_avail = pf->hw.fdir_shared_filter_count +
4708 pf->fdir_pf_filter_count;
4709 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4710 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4711 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
4712 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
4713 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
4714 }
4715 }
4716 /* Wait for some more space to be available to turn on ATR */
4717 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
4718 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4719 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
4720 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4721 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
4722 }
4723 }
4724}
4725
4726/**
4584 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 4727 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4585 * @pf: board private structure 4728 * @pf: board private structure
4586 **/ 4729 **/
@@ -4589,11 +4732,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4589 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) 4732 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4590 return; 4733 return;
4591 4734
4592 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4593
4594 /* if interface is down do nothing */ 4735 /* if interface is down do nothing */
4595 if (test_bit(__I40E_DOWN, &pf->state)) 4736 if (test_bit(__I40E_DOWN, &pf->state))
4596 return; 4737 return;
4738 i40e_fdir_check_and_reenable(pf);
4739
4740 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4741 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4742 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4597} 4743}
4598 4744
4599/** 4745/**
@@ -4903,7 +5049,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4903 event.msg_size); 5049 event.msg_size);
4904 break; 5050 break;
4905 case i40e_aqc_opc_lldp_update_mib: 5051 case i40e_aqc_opc_lldp_update_mib:
4906 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5052 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4907#ifdef CONFIG_I40E_DCB 5053#ifdef CONFIG_I40E_DCB
4908 rtnl_lock(); 5054 rtnl_lock();
4909 ret = i40e_handle_lldp_event(pf, &event); 5055 ret = i40e_handle_lldp_event(pf, &event);
@@ -4911,7 +5057,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4911#endif /* CONFIG_I40E_DCB */ 5057#endif /* CONFIG_I40E_DCB */
4912 break; 5058 break;
4913 case i40e_aqc_opc_event_lan_overflow: 5059 case i40e_aqc_opc_event_lan_overflow:
4914 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 5060 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4915 i40e_handle_lan_overflow_event(pf, &event); 5061 i40e_handle_lan_overflow_event(pf, &event);
4916 break; 5062 break;
4917 case i40e_aqc_opc_send_msg_to_peer: 5063 case i40e_aqc_opc_send_msg_to_peer:
@@ -4936,6 +5082,31 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4936} 5082}
4937 5083
4938/** 5084/**
5085 * i40e_verify_eeprom - make sure eeprom is good to use
5086 * @pf: board private structure
5087 **/
5088static void i40e_verify_eeprom(struct i40e_pf *pf)
5089{
5090 int err;
5091
5092 err = i40e_diag_eeprom_test(&pf->hw);
5093 if (err) {
5094 /* retry in case of garbage read */
5095 err = i40e_diag_eeprom_test(&pf->hw);
5096 if (err) {
5097 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5098 err);
5099 set_bit(__I40E_BAD_EEPROM, &pf->state);
5100 }
5101 }
5102
5103 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5104 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5105 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5106 }
5107}
5108
5109/**
4939 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 5110 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4940 * @veb: pointer to the VEB instance 5111 * @veb: pointer to the VEB instance
4941 * 5112 *
@@ -5053,6 +5224,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
5053 /* increment MSI-X count because current FW skips one */ 5224 /* increment MSI-X count because current FW skips one */
5054 pf->hw.func_caps.num_msix_vectors++; 5225 pf->hw.func_caps.num_msix_vectors++;
5055 5226
5227 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5228 (pf->hw.aq.fw_maj_ver < 2)) {
5229 pf->hw.func_caps.num_msix_vectors++;
5230 pf->hw.func_caps.num_msix_vectors_vf++;
5231 }
5232
5056 if (pf->hw.debug_mask & I40E_DEBUG_USER) 5233 if (pf->hw.debug_mask & I40E_DEBUG_USER)
5057 dev_info(&pf->pdev->dev, 5234 dev_info(&pf->pdev->dev,
5058 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 5235 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -5132,9 +5309,9 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5132 err = i40e_up_complete(vsi); 5309 err = i40e_up_complete(vsi);
5133 if (err) 5310 if (err)
5134 goto err_up_complete; 5311 goto err_up_complete;
5312 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5135 } 5313 }
5136 5314
5137 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5138 return; 5315 return;
5139 5316
5140err_up_complete: 5317err_up_complete:
@@ -5157,6 +5334,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5157{ 5334{
5158 int i; 5335 int i;
5159 5336
5337 i40e_fdir_filter_exit(pf);
5160 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5338 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5161 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5339 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5162 i40e_vsi_release(pf->vsi[i]); 5340 i40e_vsi_release(pf->vsi[i]);
@@ -5181,7 +5359,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5181 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 5359 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
5182 return 0; 5360 return 0;
5183 5361
5184 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5362 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5185 5363
5186 if (i40e_check_asq_alive(hw)) 5364 if (i40e_check_asq_alive(hw))
5187 i40e_vc_notify_reset(pf); 5365 i40e_vc_notify_reset(pf);
@@ -5228,7 +5406,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5228 5406
5229 if (test_bit(__I40E_DOWN, &pf->state)) 5407 if (test_bit(__I40E_DOWN, &pf->state))
5230 goto end_core_reset; 5408 goto end_core_reset;
5231 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); 5409 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
5232 5410
5233 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 5411 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5234 ret = i40e_init_adminq(&pf->hw); 5412 ret = i40e_init_adminq(&pf->hw);
@@ -5237,6 +5415,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5237 goto end_core_reset; 5415 goto end_core_reset;
5238 } 5416 }
5239 5417
5418 /* re-verify the eeprom if we just had an EMP reset */
5419 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
5420 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
5421 i40e_verify_eeprom(pf);
5422 }
5423
5240 ret = i40e_get_capabilities(pf); 5424 ret = i40e_get_capabilities(pf);
5241 if (ret) { 5425 if (ret) {
5242 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 5426 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5278,7 +5462,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5278 * try to recover minimal use by getting the basic PF VSI working. 5462 * try to recover minimal use by getting the basic PF VSI working.
5279 */ 5463 */
5280 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 5464 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
5281 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); 5465 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
5282 /* find the one VEB connected to the MAC, and find orphans */ 5466 /* find the one VEB connected to the MAC, and find orphans */
5283 for (v = 0; v < I40E_MAX_VEB; v++) { 5467 for (v = 0; v < I40E_MAX_VEB; v++) {
5284 if (!pf->veb[v]) 5468 if (!pf->veb[v])
@@ -5331,6 +5515,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5331 /* restart the VSIs that were rebuilt and running before the reset */ 5515 /* restart the VSIs that were rebuilt and running before the reset */
5332 i40e_pf_unquiesce_all_vsi(pf); 5516 i40e_pf_unquiesce_all_vsi(pf);
5333 5517
5518 if (pf->num_alloc_vfs) {
5519 for (v = 0; v < pf->num_alloc_vfs; v++)
5520 i40e_reset_vf(&pf->vf[v], true);
5521 }
5522
5334 /* tell the firmware that we're starting */ 5523 /* tell the firmware that we're starting */
5335 dv.major_version = DRV_VERSION_MAJOR; 5524 dv.major_version = DRV_VERSION_MAJOR;
5336 dv.minor_version = DRV_VERSION_MINOR; 5525 dv.minor_version = DRV_VERSION_MINOR;
@@ -5338,7 +5527,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5338 dv.subbuild_version = 0; 5527 dv.subbuild_version = 0;
5339 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 5528 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5340 5529
5341 dev_info(&pf->pdev->dev, "PF reset done\n"); 5530 dev_info(&pf->pdev->dev, "reset complete\n");
5342 5531
5343end_core_reset: 5532end_core_reset:
5344 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5533 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5387,7 +5576,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
5387 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) 5576 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
5388 >> I40E_GL_MDET_TX_QUEUE_SHIFT; 5577 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
5389 dev_info(&pf->pdev->dev, 5578 dev_info(&pf->pdev->dev,
5390 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", 5579 "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
5391 event, queue, func); 5580 event, queue, func);
5392 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 5581 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
5393 mdd_detected = true; 5582 mdd_detected = true;
@@ -5401,7 +5590,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
5401 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) 5590 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
5402 >> I40E_GL_MDET_RX_QUEUE_SHIFT; 5591 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
5403 dev_info(&pf->pdev->dev, 5592 dev_info(&pf->pdev->dev,
5404 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", 5593 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
5405 event, queue, func); 5594 event, queue, func);
5406 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 5595 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
5407 mdd_detected = true; 5596 mdd_detected = true;
@@ -5850,37 +6039,16 @@ err_out:
5850 **/ 6039 **/
5851static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 6040static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5852{ 6041{
5853 int err = 0; 6042 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
5854 6043 I40E_MIN_MSIX, vectors);
5855 pf->num_msix_entries = 0; 6044 if (vectors < 0) {
5856 while (vectors >= I40E_MIN_MSIX) {
5857 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5858 if (err == 0) {
5859 /* good to go */
5860 pf->num_msix_entries = vectors;
5861 break;
5862 } else if (err < 0) {
5863 /* total failure */
5864 dev_info(&pf->pdev->dev,
5865 "MSI-X vector reservation failed: %d\n", err);
5866 vectors = 0;
5867 break;
5868 } else {
5869 /* err > 0 is the hint for retry */
5870 dev_info(&pf->pdev->dev,
5871 "MSI-X vectors wanted %d, retrying with %d\n",
5872 vectors, err);
5873 vectors = err;
5874 }
5875 }
5876
5877 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5878 dev_info(&pf->pdev->dev, 6045 dev_info(&pf->pdev->dev,
5879 "Couldn't get enough vectors, only %d available\n", 6046 "MSI-X vector reservation failed: %d\n", vectors);
5880 vectors);
5881 vectors = 0; 6047 vectors = 0;
5882 } 6048 }
5883 6049
6050 pf->num_msix_entries = vectors;
6051
5884 return vectors; 6052 return vectors;
5885} 6053}
5886 6054
@@ -5942,7 +6110,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
5942 6110
5943 } else if (vec == I40E_MIN_MSIX) { 6111 } else if (vec == I40E_MIN_MSIX) {
5944 /* Adjust for minimal MSIX use */ 6112 /* Adjust for minimal MSIX use */
5945 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); 6113 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
5946 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 6114 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5947 pf->num_vmdq_vsis = 0; 6115 pf->num_vmdq_vsis = 0;
5948 pf->num_vmdq_qps = 0; 6116 pf->num_vmdq_qps = 0;
@@ -5978,13 +6146,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
5978} 6146}
5979 6147
5980/** 6148/**
5981 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector 6149 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
5982 * @vsi: the VSI being configured 6150 * @vsi: the VSI being configured
5983 * @v_idx: index of the vector in the vsi struct 6151 * @v_idx: index of the vector in the vsi struct
5984 * 6152 *
5985 * We allocate one q_vector. If allocation fails we return -ENOMEM. 6153 * We allocate one q_vector. If allocation fails we return -ENOMEM.
5986 **/ 6154 **/
5987static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 6155static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5988{ 6156{
5989 struct i40e_q_vector *q_vector; 6157 struct i40e_q_vector *q_vector;
5990 6158
@@ -6010,13 +6178,13 @@ static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
6010} 6178}
6011 6179
6012/** 6180/**
6013 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors 6181 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
6014 * @vsi: the VSI being configured 6182 * @vsi: the VSI being configured
6015 * 6183 *
6016 * We allocate one q_vector per queue interrupt. If allocation fails we 6184 * We allocate one q_vector per queue interrupt. If allocation fails we
6017 * return -ENOMEM. 6185 * return -ENOMEM.
6018 **/ 6186 **/
6019static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) 6187static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
6020{ 6188{
6021 struct i40e_pf *pf = vsi->back; 6189 struct i40e_pf *pf = vsi->back;
6022 int v_idx, num_q_vectors; 6190 int v_idx, num_q_vectors;
@@ -6031,7 +6199,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
6031 return -EINVAL; 6199 return -EINVAL;
6032 6200
6033 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 6201 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
6034 err = i40e_alloc_q_vector(vsi, v_idx); 6202 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
6035 if (err) 6203 if (err)
6036 goto err_out; 6204 goto err_out;
6037 } 6205 }
@@ -6071,7 +6239,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6071 6239
6072 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 6240 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6073 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 6241 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
6074 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); 6242 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
6075 err = pci_enable_msi(pf->pdev); 6243 err = pci_enable_msi(pf->pdev);
6076 if (err) { 6244 if (err) {
6077 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); 6245 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
@@ -6080,7 +6248,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6080 } 6248 }
6081 6249
6082 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 6250 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
6083 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); 6251 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
6084 6252
6085 /* track first vector for misc interrupts */ 6253 /* track first vector for misc interrupts */
6086 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 6254 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
@@ -6107,7 +6275,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
6107 i40e_intr, 0, pf->misc_int_name, pf); 6275 i40e_intr, 0, pf->misc_int_name, pf);
6108 if (err) { 6276 if (err) {
6109 dev_info(&pf->pdev->dev, 6277 dev_info(&pf->pdev->dev,
6110 "request_irq for msix_misc failed: %d\n", err); 6278 "request_irq for %s failed: %d\n",
6279 pf->misc_int_name, err);
6111 return -EFAULT; 6280 return -EFAULT;
6112 } 6281 }
6113 } 6282 }
@@ -6258,15 +6427,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
6258 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6427 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6259 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6428 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6260 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6429 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6261 dev_info(&pf->pdev->dev,
6262 "Flow Director ATR mode Enabled\n");
6263 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6430 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
6264 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6431 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6265 dev_info(&pf->pdev->dev,
6266 "Flow Director Side Band mode Enabled\n");
6267 } else { 6432 } else {
6268 dev_info(&pf->pdev->dev, 6433 dev_info(&pf->pdev->dev,
6269 "Flow Director Side Band mode Disabled in MFP mode\n"); 6434 "Flow Director Sideband mode Disabled in MFP mode\n");
6270 } 6435 }
6271 pf->fdir_pf_filter_count = 6436 pf->fdir_pf_filter_count =
6272 pf->hw.func_caps.fd_filters_guaranteed; 6437 pf->hw.func_caps.fd_filters_guaranteed;
@@ -6287,9 +6452,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
6287 pf->num_req_vfs = min_t(int, 6452 pf->num_req_vfs = min_t(int,
6288 pf->hw.func_caps.num_vfs, 6453 pf->hw.func_caps.num_vfs,
6289 I40E_MAX_VF_COUNT); 6454 I40E_MAX_VF_COUNT);
6290 dev_info(&pf->pdev->dev,
6291 "Number of VFs being requested for PF[%d] = %d\n",
6292 pf->hw.pf_id, pf->num_req_vfs);
6293 } 6455 }
6294#endif /* CONFIG_PCI_IOV */ 6456#endif /* CONFIG_PCI_IOV */
6295 pf->eeprom_version = 0xDEAD; 6457 pf->eeprom_version = 0xDEAD;
@@ -6326,6 +6488,39 @@ sw_init_done:
6326} 6488}
6327 6489
6328/** 6490/**
6491 * i40e_set_ntuple - set the ntuple feature flag and take action
6492 * @pf: board private structure to initialize
6493 * @features: the feature set that the stack is suggesting
6494 *
6495 * returns a bool to indicate if reset needs to happen
6496 **/
6497bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
6498{
6499 bool need_reset = false;
6500
6501 /* Check if Flow Director n-tuple support was enabled or disabled. If
6502 * the state changed, we need to reset.
6503 */
6504 if (features & NETIF_F_NTUPLE) {
6505 /* Enable filters and mark for reset */
6506 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6507 need_reset = true;
6508 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6509 } else {
6510 /* turn off filters, mark for reset and clear SW filter list */
6511 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
6512 need_reset = true;
6513 i40e_fdir_filter_exit(pf);
6514 }
6515 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6516 /* if ATR was disabled it can be re-enabled. */
6517 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
6518 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6519 }
6520 return need_reset;
6521}
6522
6523/**
6329 * i40e_set_features - set the netdev feature flags 6524 * i40e_set_features - set the netdev feature flags
6330 * @netdev: ptr to the netdev being adjusted 6525 * @netdev: ptr to the netdev being adjusted
6331 * @features: the feature set that the stack is suggesting 6526 * @features: the feature set that the stack is suggesting
@@ -6335,12 +6530,19 @@ static int i40e_set_features(struct net_device *netdev,
6335{ 6530{
6336 struct i40e_netdev_priv *np = netdev_priv(netdev); 6531 struct i40e_netdev_priv *np = netdev_priv(netdev);
6337 struct i40e_vsi *vsi = np->vsi; 6532 struct i40e_vsi *vsi = np->vsi;
6533 struct i40e_pf *pf = vsi->back;
6534 bool need_reset;
6338 6535
6339 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6536 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6340 i40e_vlan_stripping_enable(vsi); 6537 i40e_vlan_stripping_enable(vsi);
6341 else 6538 else
6342 i40e_vlan_stripping_disable(vsi); 6539 i40e_vlan_stripping_disable(vsi);
6343 6540
6541 need_reset = i40e_set_ntuple(pf, features);
6542
6543 if (need_reset)
6544 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
6545
6344 return 0; 6546 return 0;
6345} 6547}
6346 6548
@@ -6464,6 +6666,7 @@ static const struct net_device_ops i40e_netdev_ops = {
6464 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6666 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6465 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6667 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
6466 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6668 .ndo_get_vf_config = i40e_ndo_get_vf_config,
6669 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
6467#ifdef CONFIG_I40E_VXLAN 6670#ifdef CONFIG_I40E_VXLAN
6468 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6671 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6469 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6672 .ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -6495,10 +6698,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6495 np = netdev_priv(netdev); 6698 np = netdev_priv(netdev);
6496 np->vsi = vsi; 6699 np->vsi = vsi;
6497 6700
6498 netdev->hw_enc_features = NETIF_F_IP_CSUM | 6701 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6499 NETIF_F_GSO_UDP_TUNNEL | 6702 NETIF_F_GSO_UDP_TUNNEL |
6500 NETIF_F_TSO | 6703 NETIF_F_TSO;
6501 NETIF_F_SG;
6502 6704
6503 netdev->features = NETIF_F_SG | 6705 netdev->features = NETIF_F_SG |
6504 NETIF_F_IP_CSUM | 6706 NETIF_F_IP_CSUM |
@@ -6512,6 +6714,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6512 NETIF_F_TSO | 6714 NETIF_F_TSO |
6513 NETIF_F_TSO6 | 6715 NETIF_F_TSO6 |
6514 NETIF_F_RXCSUM | 6716 NETIF_F_RXCSUM |
6717 NETIF_F_NTUPLE |
6515 NETIF_F_RXHASH | 6718 NETIF_F_RXHASH |
6516 0; 6719 0;
6517 6720
@@ -6771,8 +6974,6 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
6771 if (vsi->netdev) { 6974 if (vsi->netdev) {
6772 /* results in a call to i40e_close() */ 6975 /* results in a call to i40e_close() */
6773 unregister_netdev(vsi->netdev); 6976 unregister_netdev(vsi->netdev);
6774 free_netdev(vsi->netdev);
6775 vsi->netdev = NULL;
6776 } 6977 }
6777 } else { 6978 } else {
6778 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 6979 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
@@ -6791,6 +6992,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
6791 6992
6792 i40e_vsi_delete(vsi); 6993 i40e_vsi_delete(vsi);
6793 i40e_vsi_free_q_vectors(vsi); 6994 i40e_vsi_free_q_vectors(vsi);
6995 if (vsi->netdev) {
6996 free_netdev(vsi->netdev);
6997 vsi->netdev = NULL;
6998 }
6794 i40e_vsi_clear_rings(vsi); 6999 i40e_vsi_clear_rings(vsi);
6795 i40e_vsi_clear(vsi); 7000 i40e_vsi_clear(vsi);
6796 7001
@@ -6845,13 +7050,12 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6845 } 7050 }
6846 7051
6847 if (vsi->base_vector) { 7052 if (vsi->base_vector) {
6848 dev_info(&pf->pdev->dev, 7053 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
6849 "VSI %d has non-zero base vector %d\n",
6850 vsi->seid, vsi->base_vector); 7054 vsi->seid, vsi->base_vector);
6851 return -EEXIST; 7055 return -EEXIST;
6852 } 7056 }
6853 7057
6854 ret = i40e_alloc_q_vectors(vsi); 7058 ret = i40e_vsi_alloc_q_vectors(vsi);
6855 if (ret) { 7059 if (ret) {
6856 dev_info(&pf->pdev->dev, 7060 dev_info(&pf->pdev->dev,
6857 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 7061 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
@@ -6865,7 +7069,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6865 vsi->num_q_vectors, vsi->idx); 7069 vsi->num_q_vectors, vsi->idx);
6866 if (vsi->base_vector < 0) { 7070 if (vsi->base_vector < 0) {
6867 dev_info(&pf->pdev->dev, 7071 dev_info(&pf->pdev->dev,
6868 "failed to get q tracking for VSI %d, err=%d\n", 7072 "failed to get queue tracking for VSI %d, err=%d\n",
6869 vsi->seid, vsi->base_vector); 7073 vsi->seid, vsi->base_vector);
6870 i40e_vsi_free_q_vectors(vsi); 7074 i40e_vsi_free_q_vectors(vsi);
6871 ret = -ENOENT; 7075 ret = -ENOENT;
@@ -7822,6 +8026,44 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
7822 return 0; 8026 return 0;
7823} 8027}
7824 8028
8029#define INFO_STRING_LEN 255
8030static void i40e_print_features(struct i40e_pf *pf)
8031{
8032 struct i40e_hw *hw = &pf->hw;
8033 char *buf, *string;
8034
8035 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
8036 if (!string) {
8037 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
8038 return;
8039 }
8040
8041 buf = string;
8042
8043 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
8044#ifdef CONFIG_PCI_IOV
8045 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
8046#endif
8047 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
8048 pf->vsi[pf->lan_vsi]->num_queue_pairs);
8049
8050 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8051 buf += sprintf(buf, "RSS ");
8052 buf += sprintf(buf, "FDir ");
8053 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8054 buf += sprintf(buf, "ATR ");
8055 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
8056 buf += sprintf(buf, "NTUPLE ");
8057 if (pf->flags & I40E_FLAG_DCB_ENABLED)
8058 buf += sprintf(buf, "DCB ");
8059 if (pf->flags & I40E_FLAG_PTP)
8060 buf += sprintf(buf, "PTP ");
8061
8062 BUG_ON(buf > (string + INFO_STRING_LEN));
8063 dev_info(&pf->pdev->dev, "%s\n", string);
8064 kfree(string);
8065}
8066
7825/** 8067/**
7826 * i40e_probe - Device initialization routine 8068 * i40e_probe - Device initialization routine
7827 * @pdev: PCI device information struct 8069 * @pdev: PCI device information struct
@@ -7848,17 +8090,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7848 return err; 8090 return err;
7849 8091
7850 /* set up for high or low dma */ 8092 /* set up for high or low dma */
7851 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 8093 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7852 /* coherent mask for the same size will always succeed if 8094 if (err) {
7853 * dma_set_mask does 8095 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7854 */ 8096 if (err) {
7855 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 8097 dev_err(&pdev->dev,
7856 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 8098 "DMA configuration failed: 0x%x\n", err);
7857 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 8099 goto err_dma;
7858 } else { 8100 }
7859 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
7860 err = -EIO;
7861 goto err_dma;
7862 } 8101 }
7863 8102
7864 /* set up pci connections */ 8103 /* set up pci connections */
@@ -7946,13 +8185,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7946 8185
7947 err = i40e_init_adminq(hw); 8186 err = i40e_init_adminq(hw);
7948 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 8187 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7949 if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
7950 >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
7951 dev_info(&pdev->dev,
7952 "warning: NVM version not supported, supported version: %02x.%02x\n",
7953 I40E_CURRENT_NVM_VERSION_HI,
7954 I40E_CURRENT_NVM_VERSION_LO);
7955 }
7956 if (err) { 8188 if (err) {
7957 dev_info(&pdev->dev, 8189 dev_info(&pdev->dev,
7958 "init_adminq failed: %d expecting API %02x.%02x\n", 8190 "init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7961,6 +8193,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7961 goto err_pf_reset; 8193 goto err_pf_reset;
7962 } 8194 }
7963 8195
8196 i40e_verify_eeprom(pf);
8197
7964 i40e_clear_pxe_mode(hw); 8198 i40e_clear_pxe_mode(hw);
7965 err = i40e_get_capabilities(pf); 8199 err = i40e_get_capabilities(pf);
7966 if (err) 8200 if (err)
@@ -8062,7 +8296,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8062 8296
8063 /* prep for VF support */ 8297 /* prep for VF support */
8064 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 8298 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8065 (pf->flags & I40E_FLAG_MSIX_ENABLED)) { 8299 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8300 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
8066 u32 val; 8301 u32 val;
8067 8302
8068 /* disable link interrupts for VFs */ 8303 /* disable link interrupts for VFs */
@@ -8070,6 +8305,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8070 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 8305 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
8071 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 8306 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
8072 i40e_flush(hw); 8307 i40e_flush(hw);
8308
8309 if (pci_num_vf(pdev)) {
8310 dev_info(&pdev->dev,
8311 "Active VFs found, allocating resources.\n");
8312 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
8313 if (err)
8314 dev_info(&pdev->dev,
8315 "Error %d allocating resources for existing VFs\n",
8316 err);
8317 }
8073 } 8318 }
8074 8319
8075 pfs_found++; 8320 pfs_found++;
@@ -8092,7 +8337,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8092 8337
8093 i40e_set_pci_config_data(hw, link_status); 8338 i40e_set_pci_config_data(hw, link_status);
8094 8339
8095 dev_info(&pdev->dev, "PCI Express: %s %s\n", 8340 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
8096 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 8341 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
8097 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 8342 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
8098 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 8343 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
@@ -8109,6 +8354,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8109 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 8354 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
8110 } 8355 }
8111 8356
8357 /* print a string summarizing features */
8358 i40e_print_features(pf);
8359
8112 return 0; 8360 return 0;
8113 8361
8114 /* Unwind what we've done if something failed in the setup */ 8362 /* Unwind what we've done if something failed in the setup */
@@ -8165,16 +8413,16 @@ static void i40e_remove(struct pci_dev *pdev)
8165 8413
8166 i40e_ptp_stop(pf); 8414 i40e_ptp_stop(pf);
8167 8415
8168 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8169 i40e_free_vfs(pf);
8170 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8171 }
8172
8173 /* no more scheduling of any task */ 8416 /* no more scheduling of any task */
8174 set_bit(__I40E_DOWN, &pf->state); 8417 set_bit(__I40E_DOWN, &pf->state);
8175 del_timer_sync(&pf->service_timer); 8418 del_timer_sync(&pf->service_timer);
8176 cancel_work_sync(&pf->service_task); 8419 cancel_work_sync(&pf->service_task);
8177 8420
8421 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8422 i40e_free_vfs(pf);
8423 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8424 }
8425
8178 i40e_fdir_teardown(pf); 8426 i40e_fdir_teardown(pf);
8179 8427
8180 /* If there is a switch structure or any orphans, remove them. 8428 /* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 73f95b081927..262bdf11d221 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -27,14 +27,14 @@
27#include "i40e_prototype.h" 27#include "i40e_prototype.h"
28 28
29/** 29/**
30 * i40e_init_nvm_ops - Initialize NVM function pointers. 30 * i40e_init_nvm_ops - Initialize NVM function pointers
31 * @hw: pointer to the HW structure. 31 * @hw: pointer to the HW structure
32 * 32 *
33 * Setups the function pointers and the NVM info structure. Should be called 33 * Setup the function pointers and the NVM info structure. Should be called
34 * once per NVM initialization, e.g. inside the i40e_init_shared_code(). 34 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
35 * Please notice that the NVM term is used here (& in all methods covered 35 * Please notice that the NVM term is used here (& in all methods covered
36 * in this file) as an equivalent of the FLASH part mapped into the SR. 36 * in this file) as an equivalent of the FLASH part mapped into the SR.
37 * We are accessing FLASH always thru the Shadow RAM. 37 * We are accessing FLASH always thru the Shadow RAM.
38 **/ 38 **/
39i40e_status i40e_init_nvm(struct i40e_hw *hw) 39i40e_status i40e_init_nvm(struct i40e_hw *hw)
40{ 40{
@@ -49,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
49 gens = rd32(hw, I40E_GLNVM_GENS); 49 gens = rd32(hw, I40E_GLNVM_GENS);
50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> 50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
51 I40E_GLNVM_GENS_SR_SIZE_SHIFT); 51 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
52 /* Switching to words (sr_size contains power of 2KB). */ 52 /* Switching to words (sr_size contains power of 2KB) */
53 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; 53 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
54 54
55 /* Check if we are in the normal or blank NVM programming mode. */ 55 /* Check if we are in the normal or blank NVM programming mode */
56 fla = rd32(hw, I40E_GLNVM_FLA); 56 fla = rd32(hw, I40E_GLNVM_FLA);
57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */ 57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
58 /* Max NVM timeout. */ 58 /* Max NVM timeout */
59 nvm->timeout = I40E_MAX_NVM_TIMEOUT; 59 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
60 nvm->blank_nvm_mode = false; 60 nvm->blank_nvm_mode = false;
61 } else { /* Blank programming mode. */ 61 } else { /* Blank programming mode */
62 nvm->blank_nvm_mode = true; 62 nvm->blank_nvm_mode = true;
63 ret_code = I40E_ERR_NVM_BLANK_MODE; 63 ret_code = I40E_ERR_NVM_BLANK_MODE;
64 hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); 64 hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
@@ -68,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
68} 68}
69 69
70/** 70/**
71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership. 71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
72 * @hw: pointer to the HW structure. 72 * @hw: pointer to the HW structure
73 * @access: NVM access type (read or write). 73 * @access: NVM access type (read or write)
74 * 74 *
75 * This function will request NVM ownership for reading 75 * This function will request NVM ownership for reading
76 * via the proper Admin Command. 76 * via the proper Admin Command.
77 **/ 77 **/
78i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 78i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
79 enum i40e_aq_resource_access_type access) 79 enum i40e_aq_resource_access_type access)
@@ -87,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
87 87
88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
89 0, &time, NULL); 89 0, &time, NULL);
90 /* Reading the Global Device Timer. */ 90 /* Reading the Global Device Timer */
91 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 91 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
92 92
93 /* Store the timeout. */ 93 /* Store the timeout */
94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime; 94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
95 95
96 if (ret_code) { 96 if (ret_code) {
97 /* Set the polling timeout. */ 97 /* Set the polling timeout */
98 if (time > I40E_MAX_NVM_TIMEOUT) 98 if (time > I40E_MAX_NVM_TIMEOUT)
99 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) 99 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
100 + gtime; 100 + gtime;
101 else 101 else
102 timeout = hw->nvm.hw_semaphore_timeout; 102 timeout = hw->nvm.hw_semaphore_timeout;
103 /* Poll until the current NVM owner timeouts. */ 103 /* Poll until the current NVM owner timeouts */
104 while (gtime < timeout) { 104 while (gtime < timeout) {
105 usleep_range(10000, 20000); 105 usleep_range(10000, 20000);
106 ret_code = i40e_aq_request_resource(hw, 106 ret_code = i40e_aq_request_resource(hw,
@@ -128,10 +128,10 @@ i40e_i40e_acquire_nvm_exit:
128} 128}
129 129
130/** 130/**
131 * i40e_release_nvm - Generic request for releasing the NVM ownership. 131 * i40e_release_nvm - Generic request for releasing the NVM ownership
132 * @hw: pointer to the HW structure. 132 * @hw: pointer to the HW structure
133 * 133 *
134 * This function will release NVM resource via the proper Admin Command. 134 * This function will release NVM resource via the proper Admin Command.
135 **/ 135 **/
136void i40e_release_nvm(struct i40e_hw *hw) 136void i40e_release_nvm(struct i40e_hw *hw)
137{ 137{
@@ -140,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw)
140} 140}
141 141
142/** 142/**
143 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit. 143 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
144 * @hw: pointer to the HW structure. 144 * @hw: pointer to the HW structure
145 * 145 *
146 * Polls the SRCTL Shadow RAM register done bit. 146 * Polls the SRCTL Shadow RAM register done bit.
147 **/ 147 **/
148static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) 148static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
149{ 149{
150 i40e_status ret_code = I40E_ERR_TIMEOUT; 150 i40e_status ret_code = I40E_ERR_TIMEOUT;
151 u32 srctl, wait_cnt; 151 u32 srctl, wait_cnt;
152 152
153 /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */ 153 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
154 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { 154 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
155 srctl = rd32(hw, I40E_GLNVM_SRCTL); 155 srctl = rd32(hw, I40E_GLNVM_SRCTL);
156 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { 156 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
@@ -165,12 +165,12 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
165} 165}
166 166
167/** 167/**
168 * i40e_read_nvm_word - Reads Shadow RAM 168 * i40e_read_nvm_word - Reads Shadow RAM
169 * @hw: pointer to the HW structure. 169 * @hw: pointer to the HW structure
170 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 170 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
171 * @data: word read from the Shadow RAM. 171 * @data: word read from the Shadow RAM
172 * 172 *
173 * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 173 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
174 **/ 174 **/
175i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 175i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
176 u16 *data) 176 u16 *data)
@@ -184,15 +184,15 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
184 goto read_nvm_exit; 184 goto read_nvm_exit;
185 } 185 }
186 186
187 /* Poll the done bit first. */ 187 /* Poll the done bit first */
188 ret_code = i40e_poll_sr_srctl_done_bit(hw); 188 ret_code = i40e_poll_sr_srctl_done_bit(hw);
189 if (!ret_code) { 189 if (!ret_code) {
190 /* Write the address and start reading. */ 190 /* Write the address and start reading */
191 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 191 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
192 (1 << I40E_GLNVM_SRCTL_START_SHIFT); 192 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
193 wr32(hw, I40E_GLNVM_SRCTL, sr_reg); 193 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
194 194
195 /* Poll I40E_GLNVM_SRCTL until the done bit is set. */ 195 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
196 ret_code = i40e_poll_sr_srctl_done_bit(hw); 196 ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 if (!ret_code) { 197 if (!ret_code) {
198 sr_reg = rd32(hw, I40E_GLNVM_SRDATA); 198 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
@@ -210,16 +210,15 @@ read_nvm_exit:
210} 210}
211 211
212/** 212/**
213 * i40e_read_nvm_buffer - Reads Shadow RAM buffer. 213 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
214 * @hw: pointer to the HW structure. 214 * @hw: pointer to the HW structure
215 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 215 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
216 * @words: number of words to read (in) & 216 * @words: (in) number of words to read; (out) number of words actually read
217 * number of words read before the NVM ownership timeout (out). 217 * @data: words read from the Shadow RAM
218 * @data: words read from the Shadow RAM.
219 * 218 *
220 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 219 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
221 * method. The buffer read is preceded by the NVM ownership take 220 * method. The buffer read is preceded by the NVM ownership take
222 * and followed by the release. 221 * and followed by the release.
223 **/ 222 **/
224i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 223i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
225 u16 *words, u16 *data) 224 u16 *words, u16 *data)
@@ -227,7 +226,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
227 i40e_status ret_code = 0; 226 i40e_status ret_code = 0;
228 u16 index, word; 227 u16 index, word;
229 228
230 /* Loop thru the selected region. */ 229 /* Loop thru the selected region */
231 for (word = 0; word < *words; word++) { 230 for (word = 0; word < *words; word++) {
232 index = offset + word; 231 index = offset + word;
233 ret_code = i40e_read_nvm_word(hw, index, &data[word]); 232 ret_code = i40e_read_nvm_word(hw, index, &data[word]);
@@ -235,21 +234,21 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
235 break; 234 break;
236 } 235 }
237 236
238 /* Update the number of words read from the Shadow RAM. */ 237 /* Update the number of words read from the Shadow RAM */
239 *words = word; 238 *words = word;
240 239
241 return ret_code; 240 return ret_code;
242} 241}
243 242
244/** 243/**
245 * i40e_calc_nvm_checksum - Calculates and returns the checksum 244 * i40e_calc_nvm_checksum - Calculates and returns the checksum
246 * @hw: pointer to hardware structure 245 * @hw: pointer to hardware structure
247 * @checksum: pointer to the checksum 246 * @checksum: pointer to the checksum
248 * 247 *
249 * This function calculate SW Checksum that covers the whole 64kB shadow RAM 248 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
250 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 249 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
251 * is customer specific and unknown. Therefore, this function skips all maximum 250 * is customer specific and unknown. Therefore, this function skips all maximum
252 * possible size of VPD (1kB). 251 * possible size of VPD (1kB).
253 **/ 252 **/
254static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, 253static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
255 u16 *checksum) 254 u16 *checksum)
@@ -311,12 +310,12 @@ i40e_calc_nvm_checksum_exit:
311} 310}
312 311
313/** 312/**
314 * i40e_validate_nvm_checksum - Validate EEPROM checksum 313 * i40e_validate_nvm_checksum - Validate EEPROM checksum
315 * @hw: pointer to hardware structure 314 * @hw: pointer to hardware structure
316 * @checksum: calculated checksum 315 * @checksum: calculated checksum
317 * 316 *
318 * Performs checksum calculation and validates the NVM SW checksum. If the 317 * Performs checksum calculation and validates the NVM SW checksum. If the
319 * caller does not need checksum, the value can be NULL. 318 * caller does not need checksum, the value can be NULL.
320 **/ 319 **/
321i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, 320i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
322 u16 *checksum) 321 u16 *checksum)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ed91f93ede2b..9cd57e617959 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -231,6 +231,13 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
231 u16 *checksum); 231 u16 *checksum);
232void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); 232void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
233 233
234extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
235
236static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
237{
238 return i40e_ptype_lookup[ptype];
239}
240
234/* prototype for functions used for SW locks */ 241/* prototype for functions used for SW locks */
235 242
236/* i40e_common for VF drivers*/ 243/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4bb482b1a7f..0f5d96ad281d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -25,6 +25,7 @@
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27#include "i40e.h" 27#include "i40e.h"
28#include "i40e_prototype.h"
28 29
29static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 30static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
30 u32 td_tag) 31 u32 td_tag)
@@ -39,11 +40,12 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
39#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) 40#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
40/** 41/**
41 * i40e_program_fdir_filter - Program a Flow Director filter 42 * i40e_program_fdir_filter - Program a Flow Director filter
42 * @fdir_input: Packet data that will be filter parameters 43 * @fdir_data: Packet data that will be filter parameters
44 * @raw_packet: the pre-allocated packet buffer for FDir
43 * @pf: The pf pointer 45 * @pf: The pf pointer
44 * @add: True for add/update, False for remove 46 * @add: True for add/update, False for remove
45 **/ 47 **/
46int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, 48int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
47 struct i40e_pf *pf, bool add) 49 struct i40e_pf *pf, bool add)
48{ 50{
49 struct i40e_filter_program_desc *fdir_desc; 51 struct i40e_filter_program_desc *fdir_desc;
@@ -68,8 +70,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
68 tx_ring = vsi->tx_rings[0]; 70 tx_ring = vsi->tx_rings[0];
69 dev = tx_ring->dev; 71 dev = tx_ring->dev;
70 72
71 dma = dma_map_single(dev, fdir_data->raw_packet, 73 dma = dma_map_single(dev, raw_packet,
72 I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); 74 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
73 if (dma_mapping_error(dev, dma)) 75 if (dma_mapping_error(dev, dma))
74 goto dma_fail; 76 goto dma_fail;
75 77
@@ -132,14 +134,14 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
132 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; 134 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
133 135
134 /* record length, and DMA address */ 136 /* record length, and DMA address */
135 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); 137 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
136 dma_unmap_addr_set(tx_buf, dma, dma); 138 dma_unmap_addr_set(tx_buf, dma, dma);
137 139
138 tx_desc->buffer_addr = cpu_to_le64(dma); 140 tx_desc->buffer_addr = cpu_to_le64(dma);
139 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; 141 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
140 142
141 tx_desc->cmd_type_offset_bsz = 143 tx_desc->cmd_type_offset_bsz =
142 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); 144 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
143 145
144 /* set the timestamp */ 146 /* set the timestamp */
145 tx_buf->time_stamp = jiffies; 147 tx_buf->time_stamp = jiffies;
@@ -161,26 +163,329 @@ dma_fail:
161 return -1; 163 return -1;
162} 164}
163 165
166#define IP_HEADER_OFFSET 14
167#define I40E_UDPIP_DUMMY_PACKET_LEN 42
168/**
169 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
170 * @vsi: pointer to the targeted VSI
171 * @fd_data: the flow director data required for the FDir descriptor
172 * @raw_packet: the pre-allocated packet buffer for FDir
173 * @add: true adds a filter, false removes it
174 *
175 * Returns 0 if the filters were successfully added or removed
176 **/
177static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
178 struct i40e_fdir_filter *fd_data,
179 u8 *raw_packet, bool add)
180{
181 struct i40e_pf *pf = vsi->back;
182 struct udphdr *udp;
183 struct iphdr *ip;
184 bool err = false;
185 int ret;
186 int i;
187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
190
191 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
192
193 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
194 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
195 + sizeof(struct iphdr));
196
197 ip->daddr = fd_data->dst_ip[0];
198 udp->dest = fd_data->dst_port;
199 ip->saddr = fd_data->src_ip[0];
200 udp->source = fd_data->src_port;
201
202 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
203 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
204 fd_data->pctype = i;
205 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
206
207 if (ret) {
208 dev_info(&pf->pdev->dev,
209 "Filter command send failed for PCTYPE %d (ret = %d)\n",
210 fd_data->pctype, ret);
211 err = true;
212 } else {
213 dev_info(&pf->pdev->dev,
214 "Filter OK for PCTYPE %d (ret = %d)\n",
215 fd_data->pctype, ret);
216 }
217 }
218
219 return err ? -EOPNOTSUPP : 0;
220}
221
222#define I40E_TCPIP_DUMMY_PACKET_LEN 54
223/**
224 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
225 * @vsi: pointer to the targeted VSI
226 * @fd_data: the flow director data required for the FDir descriptor
227 * @raw_packet: the pre-allocated packet buffer for FDir
228 * @add: true adds a filter, false removes it
229 *
230 * Returns 0 if the filters were successfully added or removed
231 **/
232static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
233 struct i40e_fdir_filter *fd_data,
234 u8 *raw_packet, bool add)
235{
236 struct i40e_pf *pf = vsi->back;
237 struct tcphdr *tcp;
238 struct iphdr *ip;
239 bool err = false;
240 int ret;
241 /* Dummy packet */
242 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
243 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
245 0x0, 0x72, 0, 0, 0, 0};
246
247 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
248
249 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
250 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
251 + sizeof(struct iphdr));
252
253 ip->daddr = fd_data->dst_ip[0];
254 tcp->dest = fd_data->dst_port;
255 ip->saddr = fd_data->src_ip[0];
256 tcp->source = fd_data->src_port;
257
258 if (add) {
259 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
260 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
261 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
262 }
263 }
264
265 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
266 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
267
268 if (ret) {
269 dev_info(&pf->pdev->dev,
270 "Filter command send failed for PCTYPE %d (ret = %d)\n",
271 fd_data->pctype, ret);
272 err = true;
273 } else {
274 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
275 fd_data->pctype, ret);
276 }
277
278 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
279
280 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
281 if (ret) {
282 dev_info(&pf->pdev->dev,
283 "Filter command send failed for PCTYPE %d (ret = %d)\n",
284 fd_data->pctype, ret);
285 err = true;
286 } else {
287 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
288 fd_data->pctype, ret);
289 }
290
291 return err ? -EOPNOTSUPP : 0;
292}
293
294/**
295 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
296 * a specific flow spec
297 * @vsi: pointer to the targeted VSI
298 * @fd_data: the flow director data required for the FDir descriptor
299 * @raw_packet: the pre-allocated packet buffer for FDir
300 * @add: true adds a filter, false removes it
301 *
302 * Always returns -EOPNOTSUPP
303 **/
304static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
305 struct i40e_fdir_filter *fd_data,
306 u8 *raw_packet, bool add)
307{
308 return -EOPNOTSUPP;
309}
310
311#define I40E_IP_DUMMY_PACKET_LEN 34
312/**
313 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
314 * a specific flow spec
315 * @vsi: pointer to the targeted VSI
316 * @fd_data: the flow director data required for the FDir descriptor
317 * @raw_packet: the pre-allocated packet buffer for FDir
318 * @add: true adds a filter, false removes it
319 *
320 * Returns 0 if the filters were successfully added or removed
321 **/
322static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
323 struct i40e_fdir_filter *fd_data,
324 u8 *raw_packet, bool add)
325{
326 struct i40e_pf *pf = vsi->back;
327 struct iphdr *ip;
328 bool err = false;
329 int ret;
330 int i;
331 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
332 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
333 0, 0, 0, 0};
334
335 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
336 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
337
338 ip->saddr = fd_data->src_ip[0];
339 ip->daddr = fd_data->dst_ip[0];
340 ip->protocol = 0;
341
342 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
343 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
344 fd_data->pctype = i;
345 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
346
347 if (ret) {
348 dev_info(&pf->pdev->dev,
349 "Filter command send failed for PCTYPE %d (ret = %d)\n",
350 fd_data->pctype, ret);
351 err = true;
352 } else {
353 dev_info(&pf->pdev->dev,
354 "Filter OK for PCTYPE %d (ret = %d)\n",
355 fd_data->pctype, ret);
356 }
357 }
358
359 return err ? -EOPNOTSUPP : 0;
360}
361
362/**
363 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
364 * @vsi: pointer to the targeted VSI
365 * @cmd: command to get or set RX flow classification rules
366 * @add: true adds a filter, false removes it
367 *
368 **/
369int i40e_add_del_fdir(struct i40e_vsi *vsi,
370 struct i40e_fdir_filter *input, bool add)
371{
372 struct i40e_pf *pf = vsi->back;
373 u8 *raw_packet;
374 int ret;
375
376 /* Populate the Flow Director that we have at the moment
377 * and allocate the raw packet buffer for the calling functions
378 */
379 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
380 if (!raw_packet)
381 return -ENOMEM;
382
383 switch (input->flow_type & ~FLOW_EXT) {
384 case TCP_V4_FLOW:
385 ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
386 add);
387 break;
388 case UDP_V4_FLOW:
389 ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
390 add);
391 break;
392 case SCTP_V4_FLOW:
393 ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
394 add);
395 break;
396 case IPV4_FLOW:
397 ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
398 add);
399 break;
400 case IP_USER_FLOW:
401 switch (input->ip4_proto) {
402 case IPPROTO_TCP:
403 ret = i40e_add_del_fdir_tcpv4(vsi, input,
404 raw_packet, add);
405 break;
406 case IPPROTO_UDP:
407 ret = i40e_add_del_fdir_udpv4(vsi, input,
408 raw_packet, add);
409 break;
410 case IPPROTO_SCTP:
411 ret = i40e_add_del_fdir_sctpv4(vsi, input,
412 raw_packet, add);
413 break;
414 default:
415 ret = i40e_add_del_fdir_ipv4(vsi, input,
416 raw_packet, add);
417 break;
418 }
419 break;
420 default:
421 dev_info(&pf->pdev->dev, "Could not specify spec type %d",
422 input->flow_type);
423 ret = -EINVAL;
424 }
425
426 kfree(raw_packet);
427 return ret;
428}
429
164/** 430/**
165 * i40e_fd_handle_status - check the Programming Status for FD 431 * i40e_fd_handle_status - check the Programming Status for FD
166 * @rx_ring: the Rx ring for this descriptor 432 * @rx_ring: the Rx ring for this descriptor
167 * @qw: the descriptor data 433 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
168 * @prog_id: the id originally used for programming 434 * @prog_id: the id originally used for programming
169 * 435 *
170 * This is used to verify if the FD programming or invalidation 436 * This is used to verify if the FD programming or invalidation
171 * requested by SW to the HW is successful or not and take actions accordingly. 437 * requested by SW to the HW is successful or not and take actions accordingly.
172 **/ 438 **/
173static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) 439static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
440 union i40e_rx_desc *rx_desc, u8 prog_id)
174{ 441{
175 struct pci_dev *pdev = rx_ring->vsi->back->pdev; 442 struct i40e_pf *pf = rx_ring->vsi->back;
443 struct pci_dev *pdev = pf->pdev;
444 u32 fcnt_prog, fcnt_avail;
176 u32 error; 445 u32 error;
446 u64 qw;
177 447
448 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
178 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> 449 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
179 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 450 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
180 451
181 /* for now just print the Status */ 452 if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
182 dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n", 453 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
183 prog_id, error); 454 rx_desc->wb.qword0.hi_dword.fd_id);
455
456 /* filter programming failed most likely due to table full */
457 fcnt_prog = i40e_get_current_fd_count(pf);
458 fcnt_avail = pf->hw.fdir_shared_filter_count +
459 pf->fdir_pf_filter_count;
460
461 /* If ATR is running fcnt_prog can quickly change,
462 * if we are very close to full, it makes sense to disable
463 * FD ATR/SB and then re-enable it when there is room.
464 */
465 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
466 /* Turn off ATR first */
467 if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
468 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
469 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
470 pf->auto_disable_flags |=
471 I40E_FLAG_FD_ATR_ENABLED;
472 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
473 } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
474 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
475 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
476 pf->auto_disable_flags |=
477 I40E_FLAG_FD_SB_ENABLED;
478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
479 }
480 } else {
481 dev_info(&pdev->dev, "FD filter programming error");
482 }
483 } else if (error ==
484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
485 if (I40E_DEBUG_FD & pf->hw.debug_mask)
486 dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
487 rx_desc->wb.qword0.hi_dword.fd_id);
488 }
184} 489}
185 490
186/** 491/**
@@ -315,6 +620,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
315} 620}
316 621
317/** 622/**
623 * i40e_get_head - Retrieve head from head writeback
624 * @tx_ring: tx ring to fetch head of
625 *
626 * Returns value of Tx ring head based on value stored
627 * in head write-back location
628 **/
629static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
630{
631 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
632
633 return le32_to_cpu(*(volatile __le32 *)head);
634}
635
636/**
318 * i40e_clean_tx_irq - Reclaim resources after transmit completes 637 * i40e_clean_tx_irq - Reclaim resources after transmit completes
319 * @tx_ring: tx ring to clean 638 * @tx_ring: tx ring to clean
320 * @budget: how many cleans we're allowed 639 * @budget: how many cleans we're allowed
@@ -325,6 +644,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
325{ 644{
326 u16 i = tx_ring->next_to_clean; 645 u16 i = tx_ring->next_to_clean;
327 struct i40e_tx_buffer *tx_buf; 646 struct i40e_tx_buffer *tx_buf;
647 struct i40e_tx_desc *tx_head;
328 struct i40e_tx_desc *tx_desc; 648 struct i40e_tx_desc *tx_desc;
329 unsigned int total_packets = 0; 649 unsigned int total_packets = 0;
330 unsigned int total_bytes = 0; 650 unsigned int total_bytes = 0;
@@ -333,6 +653,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
333 tx_desc = I40E_TX_DESC(tx_ring, i); 653 tx_desc = I40E_TX_DESC(tx_ring, i);
334 i -= tx_ring->count; 654 i -= tx_ring->count;
335 655
656 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
657
336 do { 658 do {
337 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 659 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
338 660
@@ -343,9 +665,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
343 /* prevent any other reads prior to eop_desc */ 665 /* prevent any other reads prior to eop_desc */
344 read_barrier_depends(); 666 read_barrier_depends();
345 667
346 /* if the descriptor isn't done, no work yet to do */ 668 /* we have caught up to head, no work left to do */
347 if (!(eop_desc->cmd_type_offset_bsz & 669 if (tx_head == tx_desc)
348 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
349 break; 670 break;
350 671
351 /* clear next_to_watch to prevent false hangs */ 672 /* clear next_to_watch to prevent false hangs */
@@ -577,7 +898,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
577 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 898 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
578 899
579 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 900 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
580 i40e_fd_handle_status(rx_ring, qw, id); 901 i40e_fd_handle_status(rx_ring, rx_desc, id);
581} 902}
582 903
583/** 904/**
@@ -601,6 +922,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
601 922
602 /* round up to nearest 4K */ 923 /* round up to nearest 4K */
603 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 924 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
925 /* add u32 for head writeback, align after this takes care of
926 * guaranteeing this is at least one cache line in size
927 */
928 tx_ring->size += sizeof(u32);
604 tx_ring->size = ALIGN(tx_ring->size, 4096); 929 tx_ring->size = ALIGN(tx_ring->size, 4096);
605 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 930 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
606 &tx_ring->dma, GFP_KERNEL); 931 &tx_ring->dma, GFP_KERNEL);
@@ -892,7 +1217,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
892 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1217 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
893 return; 1218 return;
894 1219
895 /* likely incorrect csum if alternate IP extention headers found */ 1220 /* likely incorrect csum if alternate IP extension headers found */
896 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1221 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
897 return; 1222 return;
898 1223
@@ -956,6 +1281,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
956} 1281}
957 1282
958/** 1283/**
1284 * i40e_ptype_to_hash - get a hash type
1285 * @ptype: the ptype value from the descriptor
1286 *
1287 * Returns a hash type to be used by skb_set_hash
1288 **/
1289static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1290{
1291 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1292
1293 if (!decoded.known)
1294 return PKT_HASH_TYPE_NONE;
1295
1296 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1297 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1298 return PKT_HASH_TYPE_L4;
1299 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1300 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1301 return PKT_HASH_TYPE_L3;
1302 else
1303 return PKT_HASH_TYPE_L2;
1304}
1305
1306/**
959 * i40e_clean_rx_irq - Reclaim resources after receive completes 1307 * i40e_clean_rx_irq - Reclaim resources after receive completes
960 * @rx_ring: rx ring to clean 1308 * @rx_ring: rx ring to clean
961 * @budget: how many cleans we're allowed 1309 * @budget: how many cleans we're allowed
@@ -972,8 +1320,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
972 u16 i = rx_ring->next_to_clean; 1320 u16 i = rx_ring->next_to_clean;
973 union i40e_rx_desc *rx_desc; 1321 union i40e_rx_desc *rx_desc;
974 u32 rx_error, rx_status; 1322 u32 rx_error, rx_status;
1323 u8 rx_ptype;
975 u64 qword; 1324 u64 qword;
976 u16 rx_ptype; 1325
1326 if (budget <= 0)
1327 return 0;
977 1328
978 rx_desc = I40E_RX_DESC(rx_ring, i); 1329 rx_desc = I40E_RX_DESC(rx_ring, i);
979 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1330 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -1087,7 +1438,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1087 goto next_desc; 1438 goto next_desc;
1088 } 1439 }
1089 1440
1090 skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); 1441 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1442 i40e_ptype_to_hash(rx_ptype));
1091 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { 1443 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1092 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & 1444 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1093 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> 1445 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
@@ -1246,8 +1598,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1246 if (!tx_ring->atr_sample_rate) 1598 if (!tx_ring->atr_sample_rate)
1247 return; 1599 return;
1248 1600
1249 tx_ring->atr_count++;
1250
1251 /* snag network header to get L4 type and address */ 1601 /* snag network header to get L4 type and address */
1252 hdr.network = skb_network_header(skb); 1602 hdr.network = skb_network_header(skb);
1253 1603
@@ -1269,8 +1619,17 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1269 1619
1270 th = (struct tcphdr *)(hdr.network + hlen); 1620 th = (struct tcphdr *)(hdr.network + hlen);
1271 1621
1272 /* sample on all syn/fin packets or once every atr sample rate */ 1622 /* Due to lack of space, no more new filters can be programmed */
1273 if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate)) 1623 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1624 return;
1625
1626 tx_ring->atr_count++;
1627
1628 /* sample on all syn/fin/rst packets or once every atr sample rate */
1629 if (!th->fin &&
1630 !th->syn &&
1631 !th->rst &&
1632 (tx_ring->atr_count < tx_ring->atr_sample_rate))
1274 return; 1633 return;
1275 1634
1276 tx_ring->atr_count = 0; 1635 tx_ring->atr_count = 0;
@@ -1294,7 +1653,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1294 1653
1295 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 1654 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
1296 1655
1297 dtype_cmd |= th->fin ? 1656 dtype_cmd |= (th->fin || th->rst) ?
1298 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 1657 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1299 I40E_TXD_FLTR_QW1_PCMD_SHIFT) : 1658 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1300 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 1659 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
@@ -1596,7 +1955,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1596 struct i40e_tx_context_desc *context_desc; 1955 struct i40e_tx_context_desc *context_desc;
1597 int i = tx_ring->next_to_use; 1956 int i = tx_ring->next_to_use;
1598 1957
1599 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1958 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1959 !cd_tunneling && !cd_l2tag2)
1600 return; 1960 return;
1601 1961
1602 /* grab the next descriptor */ 1962 /* grab the next descriptor */
@@ -1707,9 +2067,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1707 tx_bi = &tx_ring->tx_bi[i]; 2067 tx_bi = &tx_ring->tx_bi[i];
1708 } 2068 }
1709 2069
1710 tx_desc->cmd_type_offset_bsz = 2070 /* Place RS bit on last descriptor of any packet that spans across the
1711 build_ctob(td_cmd, td_offset, size, td_tag) | 2071 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
1712 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 2072 */
2073#define WB_STRIDE 0x3
2074 if (((i & WB_STRIDE) != WB_STRIDE) &&
2075 (first <= &tx_ring->tx_bi[i]) &&
2076 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
2077 tx_desc->cmd_type_offset_bsz =
2078 build_ctob(td_cmd, td_offset, size, td_tag) |
2079 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
2080 I40E_TXD_QW1_CMD_SHIFT);
2081 } else {
2082 tx_desc->cmd_type_offset_bsz =
2083 build_ctob(td_cmd, td_offset, size, td_tag) |
2084 cpu_to_le64((u64)I40E_TXD_CMD <<
2085 I40E_TXD_QW1_CMD_SHIFT);
2086 }
1713 2087
1714 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, 2088 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1715 tx_ring->queue_index), 2089 tx_ring->queue_index),
@@ -1812,7 +2186,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1812 2186
1813 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 2187 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1814 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 2188 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1815 * + 2 desc gap to keep tail from touching head, 2189 * + 4 desc gap to avoid the cache line where head is,
1816 * + 1 desc for context descriptor, 2190 * + 1 desc for context descriptor,
1817 * otherwise try next time 2191 * otherwise try next time
1818 */ 2192 */
@@ -1823,7 +2197,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1823 count += skb_shinfo(skb)->nr_frags; 2197 count += skb_shinfo(skb)->nr_frags;
1824#endif 2198#endif
1825 count += TXD_USE_COUNT(skb_headlen(skb)); 2199 count += TXD_USE_COUNT(skb_headlen(skb));
1826 if (i40e_maybe_stop_tx(tx_ring, count + 3)) { 2200 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1827 tx_ring->tx_stats.tx_busy++; 2201 tx_ring->tx_stats.tx_busy++;
1828 return 0; 2202 return 0;
1829 } 2203 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 181a825d3160..71a968fe557f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -91,6 +91,7 @@ enum i40e_debug_mask {
91 I40E_DEBUG_FLOW = 0x00000200, 91 I40E_DEBUG_FLOW = 0x00000200,
92 I40E_DEBUG_DCB = 0x00000400, 92 I40E_DEBUG_DCB = 0x00000400,
93 I40E_DEBUG_DIAG = 0x00000800, 93 I40E_DEBUG_DIAG = 0x00000800,
94 I40E_DEBUG_FD = 0x00001000,
94 95
95 I40E_DEBUG_AQ_MESSAGE = 0x01000000, 96 I40E_DEBUG_AQ_MESSAGE = 0x01000000,
96 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, 97 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -458,6 +459,10 @@ union i40e_32byte_rx_desc {
458 union { 459 union {
459 __le32 rss; /* RSS Hash */ 460 __le32 rss; /* RSS Hash */
460 __le32 fcoe_param; /* FCoE DDP Context id */ 461 __le32 fcoe_param; /* FCoE DDP Context id */
462 /* Flow director filter id in case of
463 * Programming status desc WB
464 */
465 __le32 fd_id;
461 } hi_dword; 466 } hi_dword;
462 } qword0; 467 } qword0;
463 struct { 468 struct {
@@ -698,7 +703,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
698enum i40e_rx_prog_status_desc_error_bits { 703enum i40e_rx_prog_status_desc_error_bits {
699 /* Note: These are predefined bit offsets */ 704 /* Note: These are predefined bit offsets */
700 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, 705 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
701 I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, 706 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
702 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, 707 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
703 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 708 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
704}; 709};
@@ -1010,6 +1015,11 @@ struct i40e_hw_port_stats {
1010 u64 tx_size_big; /* ptc9522 */ 1015 u64 tx_size_big; /* ptc9522 */
1011 u64 mac_short_packet_dropped; /* mspdc */ 1016 u64 mac_short_packet_dropped; /* mspdc */
1012 u64 checksum_error; /* xec */ 1017 u64 checksum_error; /* xec */
1018 /* EEE LPI */
1019 bool tx_lpi_status;
1020 bool rx_lpi_status;
1021 u64 tx_lpi_count; /* etlpic */
1022 u64 rx_lpi_count; /* erlpic */
1013}; 1023};
1014 1024
1015/* Checksum and Shadow RAM pointers */ 1025/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b9d1c1c8ca5a..02c11a7f7d29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -69,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
69{ 69{
70 struct i40e_pf *pf = vf->pf; 70 struct i40e_pf *pf = vf->pf;
71 71
72 return vector_id <= pf->hw.func_caps.num_msix_vectors_vf; 72 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
73} 73}
74 74
75/***********************vf resource mgmt routines*****************/ 75/***********************vf resource mgmt routines*****************/
@@ -126,8 +126,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
127 else 127 else
128 reg_idx = I40E_VPINT_LNKLSTN( 128 reg_idx = I40E_VPINT_LNKLSTN(
129 (pf->hw.func_caps.num_msix_vectors_vf 129 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
130 * vf->vf_id) + (vector_id - 1)); 130 (vector_id - 1));
131 131
132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
133 /* Special case - No queues mapped on this vector */ 133 /* Special case - No queues mapped on this vector */
@@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
230 tx_ctx.qlen = info->ring_len; 230 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0; 232 tx_ctx.rdylist_act = 0;
233 tx_ctx.head_wb_ena = 1;
234 tx_ctx.head_wb_addr = info->dma_ring_addr +
235 (info->ring_len * sizeof(struct i40e_tx_desc));
233 236
234 /* clear the context in the HMC */ 237 /* clear the context in the HMC */
235 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 238 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -408,18 +411,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
408 "Could not allocate VF broadcast filter\n"); 411 "Could not allocate VF broadcast filter\n");
409 } 412 }
410 413
411 if (!f) {
412 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
413 ret = -ENOMEM;
414 goto error_alloc_vsi_res;
415 }
416
417 /* program mac filter */ 414 /* program mac filter */
418 ret = i40e_sync_vsi_filters(vsi); 415 ret = i40e_sync_vsi_filters(vsi);
419 if (ret) { 416 if (ret)
420 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 417 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
421 goto error_alloc_vsi_res;
422 }
423 418
424error_alloc_vsi_res: 419error_alloc_vsi_res:
425 return ret; 420 return ret;
@@ -514,7 +509,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
514 vf->lan_vsi_index = 0; 509 vf->lan_vsi_index = 0;
515 vf->lan_vsi_id = 0; 510 vf->lan_vsi_id = 0;
516 } 511 }
517 msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1; 512 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
513
518 /* disable interrupts so the VF starts in a known state */ 514 /* disable interrupts so the VF starts in a known state */
519 for (i = 0; i < msix_vf; i++) { 515 for (i = 0; i < msix_vf; i++) {
520 /* format is same for both registers */ 516 /* format is same for both registers */
@@ -679,9 +675,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
679complete_reset: 675complete_reset:
680 /* reallocate vf resources to reset the VSI state */ 676 /* reallocate vf resources to reset the VSI state */
681 i40e_free_vf_res(vf); 677 i40e_free_vf_res(vf);
682 mdelay(10);
683 i40e_alloc_vf_res(vf); 678 i40e_alloc_vf_res(vf);
684 i40e_enable_vf_mappings(vf); 679 i40e_enable_vf_mappings(vf);
680 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
685 681
686 /* tell the VF the reset is done */ 682 /* tell the VF the reset is done */
687 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 683 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -847,7 +843,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
847 * 843 *
848 * allocate vf resources 844 * allocate vf resources
849 **/ 845 **/
850static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 846int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
851{ 847{
852 struct i40e_vf *vfs; 848 struct i40e_vf *vfs;
853 int i, ret = 0; 849 int i, ret = 0;
@@ -855,16 +851,18 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
855 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 851 /* Disable interrupt 0 so we don't try to handle the VFLR. */
856 i40e_irq_dynamic_disable_icr0(pf); 852 i40e_irq_dynamic_disable_icr0(pf);
857 853
858 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 854 /* Check to see if we're just allocating resources for extant VFs */
859 if (ret) { 855 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
860 dev_err(&pf->pdev->dev, 856 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
861 "pci_enable_sriov failed with error %d!\n", ret); 857 if (ret) {
862 pf->num_alloc_vfs = 0; 858 dev_err(&pf->pdev->dev,
863 goto err_iov; 859 "Failed to enable SR-IOV, error %d.\n", ret);
860 pf->num_alloc_vfs = 0;
861 goto err_iov;
862 }
864 } 863 }
865
866 /* allocate memory */ 864 /* allocate memory */
867 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL); 865 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
868 if (!vfs) { 866 if (!vfs) {
869 ret = -ENOMEM; 867 ret = -ENOMEM;
870 goto err_alloc; 868 goto err_alloc;
@@ -1776,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1776 u32 v_retval, u8 *msg, u16 msglen) 1774 u32 v_retval, u8 *msg, u16 msglen)
1777{ 1775{
1778 struct i40e_hw *hw = &pf->hw; 1776 struct i40e_hw *hw = &pf->hw;
1779 int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1777 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
1780 struct i40e_vf *vf; 1778 struct i40e_vf *vf;
1781 int ret; 1779 int ret;
1782 1780
@@ -1873,7 +1871,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1873 /* clear the bit in GLGEN_VFLRSTAT */ 1871 /* clear the bit in GLGEN_VFLRSTAT */
1874 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 1872 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1875 1873
1876 i40e_reset_vf(vf, true); 1874 if (!test_bit(__I40E_DOWN, &pf->state))
1875 i40e_reset_vf(vf, true);
1877 } 1876 }
1878 } 1877 }
1879 1878
@@ -1924,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1924void i40e_vc_notify_link_state(struct i40e_pf *pf) 1923void i40e_vc_notify_link_state(struct i40e_pf *pf)
1925{ 1924{
1926 struct i40e_virtchnl_pf_event pfe; 1925 struct i40e_virtchnl_pf_event pfe;
1926 struct i40e_hw *hw = &pf->hw;
1927 struct i40e_vf *vf = pf->vf;
1928 struct i40e_link_status *ls = &pf->hw.phy.link_info;
1929 int i;
1927 1930
1928 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1931 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1929 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1932 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1930 pfe.event_data.link_event.link_status = 1933 for (i = 0; i < pf->num_alloc_vfs; i++) {
1931 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 1934 if (vf->link_forced) {
1932 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; 1935 pfe.event_data.link_event.link_status = vf->link_up;
1933 1936 pfe.event_data.link_event.link_speed =
1934 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1937 (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
1935 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1938 } else {
1939 pfe.event_data.link_event.link_status =
1940 ls->link_info & I40E_AQ_LINK_UP;
1941 pfe.event_data.link_event.link_speed = ls->link_speed;
1942 }
1943 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
1944 0, (u8 *)&pfe, sizeof(pfe),
1945 NULL);
1946 vf++;
1947 }
1936} 1948}
1937 1949
1938/** 1950/**
@@ -2197,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2197error_param: 2209error_param:
2198 return ret; 2210 return ret;
2199} 2211}
2212
2213/**
2214 * i40e_ndo_set_vf_link_state
2215 * @netdev: network interface device structure
2216 * @vf_id: vf identifier
2217 * @link: required link state
2218 *
2219 * Set the link state of a specified VF, regardless of physical link state
2220 **/
2221int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2222{
2223 struct i40e_netdev_priv *np = netdev_priv(netdev);
2224 struct i40e_pf *pf = np->vsi->back;
2225 struct i40e_virtchnl_pf_event pfe;
2226 struct i40e_hw *hw = &pf->hw;
2227 struct i40e_vf *vf;
2228 int ret = 0;
2229
2230 /* validate the request */
2231 if (vf_id >= pf->num_alloc_vfs) {
2232 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2233 ret = -EINVAL;
2234 goto error_out;
2235 }
2236
2237 vf = &pf->vf[vf_id];
2238
2239 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2240 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2241
2242 switch (link) {
2243 case IFLA_VF_LINK_STATE_AUTO:
2244 vf->link_forced = false;
2245 pfe.event_data.link_event.link_status =
2246 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2247 pfe.event_data.link_event.link_speed =
2248 pf->hw.phy.link_info.link_speed;
2249 break;
2250 case IFLA_VF_LINK_STATE_ENABLE:
2251 vf->link_forced = true;
2252 vf->link_up = true;
2253 pfe.event_data.link_event.link_status = true;
2254 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
2255 break;
2256 case IFLA_VF_LINK_STATE_DISABLE:
2257 vf->link_forced = true;
2258 vf->link_up = false;
2259 pfe.event_data.link_event.link_status = false;
2260 pfe.event_data.link_event.link_speed = 0;
2261 break;
2262 default:
2263 ret = -EINVAL;
2264 goto error_out;
2265 }
2266 /* Notify the VF of its new link state */
2267 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2268 0, (u8 *)&pfe, sizeof(pfe), NULL);
2269
2270error_out:
2271 return ret;
2272}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index cc1feee36e12..389c47f396d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,10 +98,13 @@ struct i40e_vf {
98 98
99 unsigned long vf_caps; /* vf's adv. capabilities */ 99 unsigned long vf_caps; /* vf's adv. capabilities */
100 unsigned long vf_states; /* vf's runtime states */ 100 unsigned long vf_states; /* vf's runtime states */
101 bool link_forced;
102 bool link_up; /* only valid if vf link is forced */
101}; 103};
102 104
103void i40e_free_vfs(struct i40e_pf *pf); 105void i40e_free_vfs(struct i40e_pf *pf);
104int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); 106int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
107int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
105int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 108int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
106 u32 v_retval, u8 *msg, u16 msglen); 109 u32 v_retval, u8 *msg, u16 msglen);
107int i40e_vc_process_vflr_event(struct i40e_pf *pf); 110int i40e_vc_process_vflr_event(struct i40e_pf *pf);
@@ -115,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
115int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); 118int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
116int i40e_ndo_get_vf_config(struct net_device *netdev, 119int i40e_ndo_get_vf_config(struct net_device *netdev,
117 int vf_id, struct ifla_vf_info *ivi); 120 int vf_id, struct ifla_vf_info *ivi);
121int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
122
118void i40e_vc_notify_link_state(struct i40e_pf *pf); 123void i40e_vc_notify_link_state(struct i40e_pf *pf);
119void i40e_vc_notify_reset(struct i40e_pf *pf); 124void i40e_vc_notify_reset(struct i40e_pf *pf);
120 125
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f7cea1bca38d..97662b6bd98a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1229#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 1229#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
1230#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 1230#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
1231 1231
1232 __le32 tenant_id ; 1232 __le32 tenant_id;
1233 u8 reserved[4]; 1233 u8 reserved[4];
1234 __le16 queue_number; 1234 __le16 queue_number;
1235#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 1235#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7b13953b28c4..ae084378faab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -160,6 +160,372 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
160} 160}
161 161
162 162
163/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
164 * hardware to a bit-field that can be used by SW to more easily determine the
165 * packet type.
166 *
167 * Macros are used to shorten the table lines and make this table human
168 * readable.
169 *
170 * We store the PTYPE in the top byte of the bit field - this is just so that
171 * we can check that the table doesn't have a row missing, as the index into
172 * the table should be the PTYPE.
173 *
174 * Typical work flow:
175 *
176 * IF NOT i40evf_ptype_lookup[ptype].known
177 * THEN
178 * Packet is unknown
179 * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
180 * Use the rest of the fields to look at the tunnels, inner protocols, etc
181 * ELSE
182 * Use the enum i40e_rx_l2_ptype to decode the packet type
183 * ENDIF
184 */
185
186/* macro to make the table lines short */
187#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
188 { PTYPE, \
189 1, \
190 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
191 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
192 I40E_RX_PTYPE_##OUTER_FRAG, \
193 I40E_RX_PTYPE_TUNNEL_##T, \
194 I40E_RX_PTYPE_TUNNEL_END_##TE, \
195 I40E_RX_PTYPE_##TEF, \
196 I40E_RX_PTYPE_INNER_PROT_##I, \
197 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
198
199#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
200 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
201
202/* shorter macros makes the table fit but are terse */
203#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
204#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
205#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
206
207/* Lookup table mapping the HW PTYPE to the bit field for decoding */
208struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
209 /* L2 Packet types */
210 I40E_PTT_UNUSED_ENTRY(0),
211 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
212 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
213 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
214 I40E_PTT_UNUSED_ENTRY(4),
215 I40E_PTT_UNUSED_ENTRY(5),
216 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
217 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
218 I40E_PTT_UNUSED_ENTRY(8),
219 I40E_PTT_UNUSED_ENTRY(9),
220 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
221 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
222 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
223 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
224 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
225 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
226 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
227 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
228 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
229 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
230 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
231 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
232
233 /* Non Tunneled IPv4 */
234 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
235 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
236 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
237 I40E_PTT_UNUSED_ENTRY(25),
238 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
239 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
240 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
241
242 /* IPv4 --> IPv4 */
243 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
244 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
245 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
246 I40E_PTT_UNUSED_ENTRY(32),
247 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
248 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
249 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
250
251 /* IPv4 --> IPv6 */
252 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
253 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
254 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
255 I40E_PTT_UNUSED_ENTRY(39),
256 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
257 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
258 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
259
260 /* IPv4 --> GRE/NAT */
261 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
262
263 /* IPv4 --> GRE/NAT --> IPv4 */
264 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
265 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
266 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
267 I40E_PTT_UNUSED_ENTRY(47),
268 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
269 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
270 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
271
272 /* IPv4 --> GRE/NAT --> IPv6 */
273 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
274 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
275 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
276 I40E_PTT_UNUSED_ENTRY(54),
277 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
278 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
279 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
280
281 /* IPv4 --> GRE/NAT --> MAC */
282 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
283
284 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
285 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
286 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
287 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
288 I40E_PTT_UNUSED_ENTRY(62),
289 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
290 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
291 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
292
293 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
294 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
295 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
296 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
297 I40E_PTT_UNUSED_ENTRY(69),
298 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
299 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
300 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
301
302 /* IPv4 --> GRE/NAT --> MAC/VLAN */
303 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
304
305 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
306 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
307 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
308 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
309 I40E_PTT_UNUSED_ENTRY(77),
310 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
311 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
312 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
313
314 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
315 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
316 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
317 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
318 I40E_PTT_UNUSED_ENTRY(84),
319 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
320 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
321 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
322
323 /* Non Tunneled IPv6 */
324 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
325 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
326 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
327 I40E_PTT_UNUSED_ENTRY(91),
328 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
329 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
330 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
331
332 /* IPv6 --> IPv4 */
333 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
334 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
335 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
336 I40E_PTT_UNUSED_ENTRY(98),
337 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
338 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
339 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
340
341 /* IPv6 --> IPv6 */
342 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
343 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
344 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
345 I40E_PTT_UNUSED_ENTRY(105),
346 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
347 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
348 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
349
350 /* IPv6 --> GRE/NAT */
351 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
352
353 /* IPv6 --> GRE/NAT -> IPv4 */
354 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
355 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
356 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
357 I40E_PTT_UNUSED_ENTRY(113),
358 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
359 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
360 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
361
362 /* IPv6 --> GRE/NAT -> IPv6 */
363 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
364 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
365 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
366 I40E_PTT_UNUSED_ENTRY(120),
367 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
368 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
369 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
370
371 /* IPv6 --> GRE/NAT -> MAC */
372 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
373
374 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
375 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
376 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
377 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
378 I40E_PTT_UNUSED_ENTRY(128),
379 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
380 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
381 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
382
383 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
384 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
385 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
386 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
387 I40E_PTT_UNUSED_ENTRY(135),
388 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
389 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
390 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
391
392 /* IPv6 --> GRE/NAT -> MAC/VLAN */
393 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
394
395 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
396 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
397 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
398 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
399 I40E_PTT_UNUSED_ENTRY(143),
400 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
401 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
402 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
403
404 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
405 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
406 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
407 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
408 I40E_PTT_UNUSED_ENTRY(150),
409 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
410 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
411 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
412
413 /* unused entries */
414 I40E_PTT_UNUSED_ENTRY(154),
415 I40E_PTT_UNUSED_ENTRY(155),
416 I40E_PTT_UNUSED_ENTRY(156),
417 I40E_PTT_UNUSED_ENTRY(157),
418 I40E_PTT_UNUSED_ENTRY(158),
419 I40E_PTT_UNUSED_ENTRY(159),
420
421 I40E_PTT_UNUSED_ENTRY(160),
422 I40E_PTT_UNUSED_ENTRY(161),
423 I40E_PTT_UNUSED_ENTRY(162),
424 I40E_PTT_UNUSED_ENTRY(163),
425 I40E_PTT_UNUSED_ENTRY(164),
426 I40E_PTT_UNUSED_ENTRY(165),
427 I40E_PTT_UNUSED_ENTRY(166),
428 I40E_PTT_UNUSED_ENTRY(167),
429 I40E_PTT_UNUSED_ENTRY(168),
430 I40E_PTT_UNUSED_ENTRY(169),
431
432 I40E_PTT_UNUSED_ENTRY(170),
433 I40E_PTT_UNUSED_ENTRY(171),
434 I40E_PTT_UNUSED_ENTRY(172),
435 I40E_PTT_UNUSED_ENTRY(173),
436 I40E_PTT_UNUSED_ENTRY(174),
437 I40E_PTT_UNUSED_ENTRY(175),
438 I40E_PTT_UNUSED_ENTRY(176),
439 I40E_PTT_UNUSED_ENTRY(177),
440 I40E_PTT_UNUSED_ENTRY(178),
441 I40E_PTT_UNUSED_ENTRY(179),
442
443 I40E_PTT_UNUSED_ENTRY(180),
444 I40E_PTT_UNUSED_ENTRY(181),
445 I40E_PTT_UNUSED_ENTRY(182),
446 I40E_PTT_UNUSED_ENTRY(183),
447 I40E_PTT_UNUSED_ENTRY(184),
448 I40E_PTT_UNUSED_ENTRY(185),
449 I40E_PTT_UNUSED_ENTRY(186),
450 I40E_PTT_UNUSED_ENTRY(187),
451 I40E_PTT_UNUSED_ENTRY(188),
452 I40E_PTT_UNUSED_ENTRY(189),
453
454 I40E_PTT_UNUSED_ENTRY(190),
455 I40E_PTT_UNUSED_ENTRY(191),
456 I40E_PTT_UNUSED_ENTRY(192),
457 I40E_PTT_UNUSED_ENTRY(193),
458 I40E_PTT_UNUSED_ENTRY(194),
459 I40E_PTT_UNUSED_ENTRY(195),
460 I40E_PTT_UNUSED_ENTRY(196),
461 I40E_PTT_UNUSED_ENTRY(197),
462 I40E_PTT_UNUSED_ENTRY(198),
463 I40E_PTT_UNUSED_ENTRY(199),
464
465 I40E_PTT_UNUSED_ENTRY(200),
466 I40E_PTT_UNUSED_ENTRY(201),
467 I40E_PTT_UNUSED_ENTRY(202),
468 I40E_PTT_UNUSED_ENTRY(203),
469 I40E_PTT_UNUSED_ENTRY(204),
470 I40E_PTT_UNUSED_ENTRY(205),
471 I40E_PTT_UNUSED_ENTRY(206),
472 I40E_PTT_UNUSED_ENTRY(207),
473 I40E_PTT_UNUSED_ENTRY(208),
474 I40E_PTT_UNUSED_ENTRY(209),
475
476 I40E_PTT_UNUSED_ENTRY(210),
477 I40E_PTT_UNUSED_ENTRY(211),
478 I40E_PTT_UNUSED_ENTRY(212),
479 I40E_PTT_UNUSED_ENTRY(213),
480 I40E_PTT_UNUSED_ENTRY(214),
481 I40E_PTT_UNUSED_ENTRY(215),
482 I40E_PTT_UNUSED_ENTRY(216),
483 I40E_PTT_UNUSED_ENTRY(217),
484 I40E_PTT_UNUSED_ENTRY(218),
485 I40E_PTT_UNUSED_ENTRY(219),
486
487 I40E_PTT_UNUSED_ENTRY(220),
488 I40E_PTT_UNUSED_ENTRY(221),
489 I40E_PTT_UNUSED_ENTRY(222),
490 I40E_PTT_UNUSED_ENTRY(223),
491 I40E_PTT_UNUSED_ENTRY(224),
492 I40E_PTT_UNUSED_ENTRY(225),
493 I40E_PTT_UNUSED_ENTRY(226),
494 I40E_PTT_UNUSED_ENTRY(227),
495 I40E_PTT_UNUSED_ENTRY(228),
496 I40E_PTT_UNUSED_ENTRY(229),
497
498 I40E_PTT_UNUSED_ENTRY(230),
499 I40E_PTT_UNUSED_ENTRY(231),
500 I40E_PTT_UNUSED_ENTRY(232),
501 I40E_PTT_UNUSED_ENTRY(233),
502 I40E_PTT_UNUSED_ENTRY(234),
503 I40E_PTT_UNUSED_ENTRY(235),
504 I40E_PTT_UNUSED_ENTRY(236),
505 I40E_PTT_UNUSED_ENTRY(237),
506 I40E_PTT_UNUSED_ENTRY(238),
507 I40E_PTT_UNUSED_ENTRY(239),
508
509 I40E_PTT_UNUSED_ENTRY(240),
510 I40E_PTT_UNUSED_ENTRY(241),
511 I40E_PTT_UNUSED_ENTRY(242),
512 I40E_PTT_UNUSED_ENTRY(243),
513 I40E_PTT_UNUSED_ENTRY(244),
514 I40E_PTT_UNUSED_ENTRY(245),
515 I40E_PTT_UNUSED_ENTRY(246),
516 I40E_PTT_UNUSED_ENTRY(247),
517 I40E_PTT_UNUSED_ENTRY(248),
518 I40E_PTT_UNUSED_ENTRY(249),
519
520 I40E_PTT_UNUSED_ENTRY(250),
521 I40E_PTT_UNUSED_ENTRY(251),
522 I40E_PTT_UNUSED_ENTRY(252),
523 I40E_PTT_UNUSED_ENTRY(253),
524 I40E_PTT_UNUSED_ENTRY(254),
525 I40E_PTT_UNUSED_ENTRY(255)
526};
527
528
163/** 529/**
164 * i40e_aq_send_msg_to_pf 530 * i40e_aq_send_msg_to_pf
165 * @hw: pointer to the hardware structure 531 * @hw: pointer to the hardware structure
@@ -199,8 +565,7 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
199 details.async = true; 565 details.async = true;
200 cmd_details = &details; 566 cmd_details = &details;
201 } 567 }
202 status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, 568 status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
203 msglen, cmd_details);
204 return status; 569 return status;
205} 570}
206 571
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 7841573a58c9..97ab8c2b76f8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -63,6 +63,13 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
63 63
64i40e_status i40e_set_mac_type(struct i40e_hw *hw); 64i40e_status i40e_set_mac_type(struct i40e_hw *hw);
65 65
66extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
67
68static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
69{
70 return i40evf_ptype_lookup[ptype];
71}
72
66/* prototype for functions used for SW locks */ 73/* prototype for functions used for SW locks */
67 74
68/* i40e_common for VF drivers*/ 75/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ffdb01d853db..53be5f44d015 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -24,6 +24,7 @@
24#include <linux/prefetch.h> 24#include <linux/prefetch.h>
25 25
26#include "i40evf.h" 26#include "i40evf.h"
27#include "i40e_prototype.h"
27 28
28static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 29static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
29 u32 td_tag) 30 u32 td_tag)
@@ -169,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
169} 170}
170 171
171/** 172/**
173 * i40e_get_head - Retrieve head from head writeback
174 * @tx_ring: tx ring to fetch head of
175 *
176 * Returns value of Tx ring head based on value stored
177 * in head write-back location
178 **/
179static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
180{
181 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
182
183 return le32_to_cpu(*(volatile __le32 *)head);
184}
185
186/**
172 * i40e_clean_tx_irq - Reclaim resources after transmit completes 187 * i40e_clean_tx_irq - Reclaim resources after transmit completes
173 * @tx_ring: tx ring to clean 188 * @tx_ring: tx ring to clean
174 * @budget: how many cleans we're allowed 189 * @budget: how many cleans we're allowed
@@ -179,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
179{ 194{
180 u16 i = tx_ring->next_to_clean; 195 u16 i = tx_ring->next_to_clean;
181 struct i40e_tx_buffer *tx_buf; 196 struct i40e_tx_buffer *tx_buf;
197 struct i40e_tx_desc *tx_head;
182 struct i40e_tx_desc *tx_desc; 198 struct i40e_tx_desc *tx_desc;
183 unsigned int total_packets = 0; 199 unsigned int total_packets = 0;
184 unsigned int total_bytes = 0; 200 unsigned int total_bytes = 0;
@@ -187,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
187 tx_desc = I40E_TX_DESC(tx_ring, i); 203 tx_desc = I40E_TX_DESC(tx_ring, i);
188 i -= tx_ring->count; 204 i -= tx_ring->count;
189 205
206 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
207
190 do { 208 do {
191 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 209 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
192 210
@@ -197,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
197 /* prevent any other reads prior to eop_desc */ 215 /* prevent any other reads prior to eop_desc */
198 read_barrier_depends(); 216 read_barrier_depends();
199 217
200 /* if the descriptor isn't done, no work yet to do */ 218 /* we have caught up to head, no work left to do */
201 if (!(eop_desc->cmd_type_offset_bsz & 219 if (tx_head == tx_desc)
202 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
203 break; 220 break;
204 221
205 /* clear next_to_watch to prevent false hangs */ 222 /* clear next_to_watch to prevent false hangs */
@@ -431,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
431 448
432 /* round up to nearest 4K */ 449 /* round up to nearest 4K */
433 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 450 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
451 /* add u32 for head writeback, align after this takes care of
452 * guaranteeing this is at least one cache line in size
453 */
454 tx_ring->size += sizeof(u32);
434 tx_ring->size = ALIGN(tx_ring->size, 4096); 455 tx_ring->size = ALIGN(tx_ring->size, 4096);
435 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 456 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
436 &tx_ring->dma, GFP_KERNEL); 457 &tx_ring->dma, GFP_KERNEL);
@@ -722,7 +743,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
722 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 743 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
723 return; 744 return;
724 745
725 /* likely incorrect csum if alternate IP extention headers found */ 746 /* likely incorrect csum if alternate IP extension headers found */
726 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 747 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
727 return; 748 return;
728 749
@@ -786,6 +807,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
786} 807}
787 808
788/** 809/**
810 * i40e_ptype_to_hash - get a hash type
811 * @ptype: the ptype value from the descriptor
812 *
813 * Returns a hash type to be used by skb_set_hash
814 **/
815static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
816{
817 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
818
819 if (!decoded.known)
820 return PKT_HASH_TYPE_NONE;
821
822 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
823 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
824 return PKT_HASH_TYPE_L4;
825 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
826 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
827 return PKT_HASH_TYPE_L3;
828 else
829 return PKT_HASH_TYPE_L2;
830}
831
832/**
789 * i40e_clean_rx_irq - Reclaim resources after receive completes 833 * i40e_clean_rx_irq - Reclaim resources after receive completes
790 * @rx_ring: rx ring to clean 834 * @rx_ring: rx ring to clean
791 * @budget: how many cleans we're allowed 835 * @budget: how many cleans we're allowed
@@ -802,13 +846,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
802 u16 i = rx_ring->next_to_clean; 846 u16 i = rx_ring->next_to_clean;
803 union i40e_rx_desc *rx_desc; 847 union i40e_rx_desc *rx_desc;
804 u32 rx_error, rx_status; 848 u32 rx_error, rx_status;
849 u8 rx_ptype;
805 u64 qword; 850 u64 qword;
806 u16 rx_ptype;
807 851
808 rx_desc = I40E_RX_DESC(rx_ring, i); 852 rx_desc = I40E_RX_DESC(rx_ring, i);
809 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 853 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
810 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) 854 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
811 >> I40E_RXD_QW1_STATUS_SHIFT; 855 I40E_RXD_QW1_STATUS_SHIFT;
812 856
813 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { 857 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
814 union i40e_rx_desc *next_rxd; 858 union i40e_rx_desc *next_rxd;
@@ -912,7 +956,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
912 goto next_desc; 956 goto next_desc;
913 } 957 }
914 958
915 skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); 959 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
960 i40e_ptype_to_hash(rx_ptype));
916 /* probably a little skewed due to removing CRC */ 961 /* probably a little skewed due to removing CRC */
917 total_rx_bytes += skb->len; 962 total_rx_bytes += skb->len;
918 total_rx_packets++; 963 total_rx_packets++;
@@ -1241,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1241 struct i40e_tx_context_desc *context_desc; 1286 struct i40e_tx_context_desc *context_desc;
1242 int i = tx_ring->next_to_use; 1287 int i = tx_ring->next_to_use;
1243 1288
1244 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1289 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1290 !cd_tunneling && !cd_l2tag2)
1245 return; 1291 return;
1246 1292
1247 /* grab the next descriptor */ 1293 /* grab the next descriptor */
@@ -1352,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1352 tx_bi = &tx_ring->tx_bi[i]; 1398 tx_bi = &tx_ring->tx_bi[i];
1353 } 1399 }
1354 1400
1355 tx_desc->cmd_type_offset_bsz = 1401 /* Place RS bit on last descriptor of any packet that spans across the
1356 build_ctob(td_cmd, td_offset, size, td_tag) | 1402 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
1357 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 1403 */
1404#define WB_STRIDE 0x3
1405 if (((i & WB_STRIDE) != WB_STRIDE) &&
1406 (first <= &tx_ring->tx_bi[i]) &&
1407 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
1408 tx_desc->cmd_type_offset_bsz =
1409 build_ctob(td_cmd, td_offset, size, td_tag) |
1410 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
1411 I40E_TXD_QW1_CMD_SHIFT);
1412 } else {
1413 tx_desc->cmd_type_offset_bsz =
1414 build_ctob(td_cmd, td_offset, size, td_tag) |
1415 cpu_to_le64((u64)I40E_TXD_CMD <<
1416 I40E_TXD_QW1_CMD_SHIFT);
1417 }
1358 1418
1359 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, 1419 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1360 tx_ring->queue_index), 1420 tx_ring->queue_index),
@@ -1457,7 +1517,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1457 1517
1458 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 1518 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1459 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 1519 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1460 * + 2 desc gap to keep tail from touching head, 1520 * + 4 desc gap to avoid the cache line where head is,
1461 * + 1 desc for context descriptor, 1521 * + 1 desc for context descriptor,
1462 * otherwise try next time 1522 * otherwise try next time
1463 */ 1523 */
@@ -1468,7 +1528,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1468 count += skb_shinfo(skb)->nr_frags; 1528 count += skb_shinfo(skb)->nr_frags;
1469#endif 1529#endif
1470 count += TXD_USE_COUNT(skb_headlen(skb)); 1530 count += TXD_USE_COUNT(skb_headlen(skb));
1471 if (i40e_maybe_stop_tx(tx_ring, count + 3)) { 1531 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1472 tx_ring->tx_stats.tx_busy++; 1532 tx_ring->tx_stats.tx_busy++;
1473 return 0; 1533 return 0;
1474 } 1534 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3bffac06592f..4673b3381edd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
64struct i40e_hw; 64struct i40e_hw;
65typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); 65typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
66 66
67#define ETH_ALEN 6
68
69/* Data type manipulation macros. */ 67/* Data type manipulation macros. */
70 68
71#define I40E_DESC_UNUSED(R) \ 69#define I40E_DESC_UNUSED(R) \
@@ -90,6 +88,7 @@ enum i40e_debug_mask {
90 I40E_DEBUG_FLOW = 0x00000200, 88 I40E_DEBUG_FLOW = 0x00000200,
91 I40E_DEBUG_DCB = 0x00000400, 89 I40E_DEBUG_DCB = 0x00000400,
92 I40E_DEBUG_DIAG = 0x00000800, 90 I40E_DEBUG_DIAG = 0x00000800,
91 I40E_DEBUG_FD = 0x00001000,
93 92
94 I40E_DEBUG_AQ_MESSAGE = 0x01000000, 93 I40E_DEBUG_AQ_MESSAGE = 0x01000000,
95 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, 94 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -466,6 +465,10 @@ union i40e_32byte_rx_desc {
466 union { 465 union {
467 __le32 rss; /* RSS Hash */ 466 __le32 rss; /* RSS Hash */
468 __le32 fcoe_param; /* FCoE DDP Context id */ 467 __le32 fcoe_param; /* FCoE DDP Context id */
468 /* Flow director filter id in case of
469 * Programming status desc WB
470 */
471 __le32 fd_id;
469 } hi_dword; 472 } hi_dword;
470 } qword0; 473 } qword0;
471 struct { 474 struct {
@@ -706,7 +709,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
706enum i40e_rx_prog_status_desc_error_bits { 709enum i40e_rx_prog_status_desc_error_bits {
707 /* Note: These are predefined bit offsets */ 710 /* Note: These are predefined bit offsets */
708 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, 711 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
709 I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, 712 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
710 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, 713 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
711 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 714 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
712}; 715};
@@ -1018,6 +1021,11 @@ struct i40e_hw_port_stats {
1018 u64 tx_size_big; /* ptc9522 */ 1021 u64 tx_size_big; /* ptc9522 */
1019 u64 mac_short_packet_dropped; /* mspdc */ 1022 u64 mac_short_packet_dropped; /* mspdc */
1020 u64 checksum_error; /* xec */ 1023 u64 checksum_error; /* xec */
1024 /* EEE LPI */
1025 bool tx_lpi_status;
1026 bool rx_lpi_status;
1027 u64 tx_lpi_count; /* etlpic */
1028 u64 rx_lpi_count; /* erlpic */
1021}; 1029};
1022 1030
1023/* Checksum and Shadow RAM pointers */ 1031/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index ff6529b288a1..807807d62387 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
38#include <linux/ipv6.h> 38#include <linux/ipv6.h>
39#include <net/ip6_checksum.h> 39#include <net/ip6_checksum.h>
40#include <net/udp.h> 40#include <net/udp.h>
41#include <linux/sctp.h>
42
43 41
44#include "i40e_type.h" 42#include "i40e_type.h"
45#include "i40e_virtchnl.h" 43#include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
164/* Driver state. The order of these is important! */ 162/* Driver state. The order of these is important! */
165enum i40evf_state_t { 163enum i40evf_state_t {
166 __I40EVF_STARTUP, /* driver loaded, probe complete */ 164 __I40EVF_STARTUP, /* driver loaded, probe complete */
167 __I40EVF_FAILED, /* PF communication failed. Fatal. */
168 __I40EVF_REMOVE, /* driver is being unloaded */ 165 __I40EVF_REMOVE, /* driver is being unloaded */
169 __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ 166 __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
170 __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ 167 __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
171 __I40EVF_INIT_SW, /* got resources, setting up structs */ 168 __I40EVF_INIT_SW, /* got resources, setting up structs */
169 __I40EVF_RESETTING, /* in reset */
172 /* Below here, watchdog is running */ 170 /* Below here, watchdog is running */
173 __I40EVF_DOWN, /* ready, can be opened */ 171 __I40EVF_DOWN, /* ready, can be opened */
174 __I40EVF_TESTING, /* in ethtool self-test */ 172 __I40EVF_TESTING, /* in ethtool self-test */
175 __I40EVF_RESETTING, /* in reset */
176 __I40EVF_RUNNING, /* opened, working */ 173 __I40EVF_RUNNING, /* opened, working */
177}; 174};
178 175
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
185/* board specific private data structure */ 182/* board specific private data structure */
186struct i40evf_adapter { 183struct i40evf_adapter {
187 struct timer_list watchdog_timer; 184 struct timer_list watchdog_timer;
188 struct vlan_group *vlgrp;
189 struct work_struct reset_task; 185 struct work_struct reset_task;
190 struct work_struct adminq_task; 186 struct work_struct adminq_task;
191 struct delayed_work init_task; 187 struct delayed_work init_task;
192 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 188 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
193 struct list_head vlan_filter_list; 189 struct list_head vlan_filter_list;
194 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; 190 char misc_vector_name[IFNAMSIZ + 9];
195
196 /* Interrupt Throttle Rate */
197 u32 itr_setting;
198 u16 eitr_low;
199 u16 eitr_high;
200 191
201 /* TX */ 192 /* TX */
202 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; 193 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
203 u64 restart_queue;
204 u64 hw_csum_tx_good;
205 u64 lsc_int;
206 u64 hw_tso_ctxt;
207 u64 hw_tso6_ctxt;
208 u32 tx_timeout_count; 194 u32 tx_timeout_count;
209 struct list_head mac_filter_list; 195 struct list_head mac_filter_list;
210#ifdef DEBUG
211 bool detect_tx_hung;
212#endif /* DEBUG */
213 196
214 /* RX */ 197 /* RX */
215 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; 198 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
216 int txd_count;
217 int rxd_count;
218 u64 hw_csum_rx_error; 199 u64 hw_csum_rx_error;
219 u64 hw_rx_no_dma_resources;
220 u64 hw_csum_rx_good;
221 u64 non_eop_descs;
222 int num_msix_vectors; 200 int num_msix_vectors;
223 struct msix_entry *msix_entries; 201 struct msix_entry *msix_entries;
224 202
225 u64 rx_hdr_split; 203 u32 flags;
226
227 u32 init_state;
228 volatile unsigned long flags;
229#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) 204#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
230#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) 205#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
231#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) 206#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
@@ -234,6 +209,9 @@ struct i40evf_adapter {
234#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) 209#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
235#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) 210#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
236#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) 211#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
212#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
213#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
214#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
237/* duplcates for common code */ 215/* duplcates for common code */
238#define I40E_FLAG_FDIR_ATR_ENABLED 0 216#define I40E_FLAG_FDIR_ATR_ENABLED 0
239#define I40E_FLAG_DCB_ENABLED 0 217#define I40E_FLAG_DCB_ENABLED 0
@@ -251,21 +229,19 @@ struct i40evf_adapter {
251#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) 229#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
252#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) 230#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
253#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) 231#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
232
254 /* OS defined structs */ 233 /* OS defined structs */
255 struct net_device *netdev; 234 struct net_device *netdev;
256 struct pci_dev *pdev; 235 struct pci_dev *pdev;
257 struct net_device_stats net_stats; 236 struct net_device_stats net_stats;
258 237
259 /* structs defined in i40e_vf.h */ 238 struct i40e_hw hw; /* defined in i40e_type.h */
260 struct i40e_hw hw;
261 239
262 enum i40evf_state_t state; 240 enum i40evf_state_t state;
263 volatile unsigned long crit_section; 241 volatile unsigned long crit_section;
264 u64 tx_busy;
265 242
266 struct work_struct watchdog_task; 243 struct work_struct watchdog_task;
267 bool netdev_registered; 244 bool netdev_registered;
268 bool dev_closed;
269 bool link_up; 245 bool link_up;
270 enum i40e_virtchnl_ops current_op; 246 enum i40e_virtchnl_ops current_op;
271 struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ 247 struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +252,6 @@ struct i40evf_adapter {
276 u32 aq_wait_count; 252 u32 aq_wait_count;
277}; 253};
278 254
279struct i40evf_info {
280 enum i40e_mac_type mac;
281 unsigned int flags;
282};
283
284 255
285/* needed by i40evf_ethtool.c */ 256/* needed by i40evf_ethtool.c */
286extern char i40evf_driver_name[]; 257extern char i40evf_driver_name[];
@@ -315,6 +286,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
315void i40evf_del_vlans(struct i40evf_adapter *adapter); 286void i40evf_del_vlans(struct i40evf_adapter *adapter);
316void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); 287void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
317void i40evf_request_stats(struct i40evf_adapter *adapter); 288void i40evf_request_stats(struct i40evf_adapter *adapter);
289void i40evf_request_reset(struct i40evf_adapter *adapter);
318void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, 290void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
319 enum i40e_virtchnl_ops v_opcode, 291 enum i40e_virtchnl_ops v_opcode,
320 i40e_status v_retval, u8 *msg, u16 msglen); 292 i40e_status v_retval, u8 *msg, u16 msglen);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index b0b1f4bf5ac0..8b0db1ce179c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
241{ 241{
242 struct i40evf_adapter *adapter = netdev_priv(netdev); 242 struct i40evf_adapter *adapter = netdev_priv(netdev);
243 u32 new_rx_count, new_tx_count; 243 u32 new_rx_count, new_tx_count;
244 int i;
244 245
245 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
246 return -EINVAL; 247 return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
256 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); 257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
257 258
258 /* if nothing to do return success */ 259 /* if nothing to do return success */
259 if ((new_tx_count == adapter->txd_count) && 260 if ((new_tx_count == adapter->tx_rings[0]->count) &&
260 (new_rx_count == adapter->rxd_count)) 261 (new_rx_count == adapter->rx_rings[0]->count))
261 return 0; 262 return 0;
262 263
263 adapter->txd_count = new_tx_count; 264 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
264 adapter->rxd_count = new_rx_count; 265 adapter->tx_rings[0]->count = new_tx_count;
266 adapter->rx_rings[0]->count = new_rx_count;
267 }
265 268
266 if (netif_running(netdev)) 269 if (netif_running(netdev))
267 i40evf_reinit_locked(adapter); 270 i40evf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f5caf4419243..e35e66ffa782 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
31static const char i40evf_driver_string[] = 31static const char i40evf_driver_string[] =
32 "Intel(R) XL710 X710 Virtual Function Network Driver"; 32 "Intel(R) XL710 X710 Virtual Function Network Driver";
33 33
34#define DRV_VERSION "0.9.11" 34#define DRV_VERSION "0.9.16"
35const char i40evf_driver_version[] = DRV_VERSION; 35const char i40evf_driver_version[] = DRV_VERSION;
36static const char i40evf_copyright[] = 36static const char i40evf_copyright[] =
37 "Copyright (c) 2013 Intel Corporation."; 37 "Copyright (c) 2013 - 2014 Intel Corporation.";
38 38
39/* i40evf_pci_tbl - PCI Device ID Table 39/* i40evf_pci_tbl - PCI Device ID Table
40 * 40 *
@@ -167,9 +167,11 @@ static void i40evf_tx_timeout(struct net_device *netdev)
167 struct i40evf_adapter *adapter = netdev_priv(netdev); 167 struct i40evf_adapter *adapter = netdev_priv(netdev);
168 168
169 adapter->tx_timeout_count++; 169 adapter->tx_timeout_count++;
170 170 dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
171 /* Do the reset outside of interrupt context */ 171 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
172 schedule_work(&adapter->reset_task); 172 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
173 schedule_work(&adapter->reset_task);
174 }
173} 175}
174 176
175/** 177/**
@@ -211,6 +213,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
211 int i; 213 int i;
212 struct i40e_hw *hw = &adapter->hw; 214 struct i40e_hw *hw = &adapter->hw;
213 215
216 if (!adapter->msix_entries)
217 return;
218
214 for (i = 1; i < adapter->num_msix_vectors; i++) { 219 for (i = 1; i < adapter->num_msix_vectors; i++) {
215 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); 220 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
216 synchronize_irq(adapter->msix_entries[i].vector); 221 synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +516,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
511 struct net_device *netdev = adapter->netdev; 516 struct net_device *netdev = adapter->netdev;
512 int err; 517 int err;
513 518
514 sprintf(adapter->name[0], "i40evf:mbx"); 519 sprintf(adapter->misc_vector_name, "i40evf:mbx");
515 err = request_irq(adapter->msix_entries[0].vector, 520 err = request_irq(adapter->msix_entries[0].vector,
516 &i40evf_msix_aq, 0, adapter->name[0], netdev); 521 &i40evf_msix_aq, 0,
522 adapter->misc_vector_name, netdev);
517 if (err) { 523 if (err) {
518 dev_err(&adapter->pdev->dev, 524 dev_err(&adapter->pdev->dev,
519 "request_irq for msix_aq failed: %d\n", err); 525 "request_irq for %s failed: %d\n",
526 adapter->misc_vector_name, err);
520 free_irq(adapter->msix_entries[0].vector, netdev); 527 free_irq(adapter->msix_entries[0].vector, netdev);
521 } 528 }
522 return err; 529 return err;
@@ -963,16 +970,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
963 struct net_device *netdev = adapter->netdev; 970 struct net_device *netdev = adapter->netdev;
964 struct i40evf_mac_filter *f; 971 struct i40evf_mac_filter *f;
965 972
966 /* remove all MAC filters from the VSI */ 973 /* remove all MAC filters */
967 list_for_each_entry(f, &adapter->mac_filter_list, list) { 974 list_for_each_entry(f, &adapter->mac_filter_list, list) {
968 f->remove = true; 975 f->remove = true;
969 } 976 }
970 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; 977 /* remove all VLAN filters */
971 /* disable receives */ 978 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
972 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 979 f->remove = true;
973 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); 980 }
974 msleep(20); 981 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
975 982 adapter->state != __I40EVF_RESETTING) {
983 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
984 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
985 /* disable receives */
986 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
987 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
988 msleep(20);
989 }
976 netif_tx_disable(netdev); 990 netif_tx_disable(netdev);
977 991
978 netif_tx_stop_all_queues(netdev); 992 netif_tx_stop_all_queues(netdev);
@@ -1124,8 +1138,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1124 * than CPU's. So let's be conservative and only ask for 1138 * than CPU's. So let's be conservative and only ask for
1125 * (roughly) twice the number of vectors as there are CPU's. 1139 * (roughly) twice the number of vectors as there are CPU's.
1126 */ 1140 */
1127 v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1141 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1128 v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1); 1142 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1129 1143
1130 /* A failure in MSI-X entry allocation isn't fatal, but it does 1144 /* A failure in MSI-X entry allocation isn't fatal, but it does
1131 * mean we disable MSI-X capabilities of the adapter. 1145 * mean we disable MSI-X capabilities of the adapter.
@@ -1291,19 +1305,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
1291 watchdog_task); 1305 watchdog_task);
1292 struct i40e_hw *hw = &adapter->hw; 1306 struct i40e_hw *hw = &adapter->hw;
1293 1307
1294 if (adapter->state < __I40EVF_DOWN) 1308 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1309 goto restart_watchdog;
1310
1311 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1312 dev_info(&adapter->pdev->dev, "Checking for redemption\n");
1313 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
1314 /* A chance for redemption! */
1315 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1316 adapter->state = __I40EVF_STARTUP;
1317 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1318 schedule_delayed_work(&adapter->init_task, 10);
1319 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1320 &adapter->crit_section);
1321 /* Don't reschedule the watchdog, since we've restarted
1322 * the init task. When init_task contacts the PF and
1323 * gets everything set up again, it'll restart the
1324 * watchdog for us. Down, boy. Sit. Stay. Woof.
1325 */
1326 return;
1327 }
1328 adapter->aq_pending = 0;
1329 adapter->aq_required = 0;
1330 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1295 goto watchdog_done; 1331 goto watchdog_done;
1332 }
1296 1333
1297 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1334 if ((adapter->state < __I40EVF_DOWN) ||
1335 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1298 goto watchdog_done; 1336 goto watchdog_done;
1299 1337
1300 /* check for unannounced reset */ 1338 /* check for reset */
1301 if ((adapter->state != __I40EVF_RESETTING) && 1339 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
1302 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { 1340 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
1303 adapter->state = __I40EVF_RESETTING; 1341 adapter->state = __I40EVF_RESETTING;
1342 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1343 dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
1344 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1304 schedule_work(&adapter->reset_task); 1345 schedule_work(&adapter->reset_task);
1305 dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n", 1346 adapter->aq_pending = 0;
1306 __func__); 1347 adapter->aq_required = 0;
1348 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1307 goto watchdog_done; 1349 goto watchdog_done;
1308 } 1350 }
1309 1351
@@ -1358,16 +1400,25 @@ static void i40evf_watchdog_task(struct work_struct *work)
1358 1400
1359 i40evf_irq_enable(adapter, true); 1401 i40evf_irq_enable(adapter, true);
1360 i40evf_fire_sw_int(adapter, 0xFF); 1402 i40evf_fire_sw_int(adapter, 0xFF);
1403
1361watchdog_done: 1404watchdog_done:
1405 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1406restart_watchdog:
1362 if (adapter->aq_required) 1407 if (adapter->aq_required)
1363 mod_timer(&adapter->watchdog_timer, 1408 mod_timer(&adapter->watchdog_timer,
1364 jiffies + msecs_to_jiffies(20)); 1409 jiffies + msecs_to_jiffies(20));
1365 else 1410 else
1366 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); 1411 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1367 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1368 schedule_work(&adapter->adminq_task); 1412 schedule_work(&adapter->adminq_task);
1369} 1413}
1370 1414
1415static int next_queue(struct i40evf_adapter *adapter, int j)
1416{
1417 j += 1;
1418
1419 return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
1420}
1421
1371/** 1422/**
1372 * i40evf_configure_rss - Prepare for RSS if used 1423 * i40evf_configure_rss - Prepare for RSS if used
1373 * @adapter: board private structure 1424 * @adapter: board private structure
@@ -1398,19 +1449,19 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1398 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); 1449 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1399 1450
1400 /* Populate the LUT with max no. of queues in round robin fashion */ 1451 /* Populate the LUT with max no. of queues in round robin fashion */
1401 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) { 1452 j = adapter->vsi_res->num_queue_pairs;
1402 if (j == adapter->vsi_res->num_queue_pairs) 1453 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
1403 j = 0; 1454 lut = next_queue(adapter, j);
1404 /* lut = 4-byte sliding window of 4 lut entries */ 1455 lut |= next_queue(adapter, j) << 8;
1405 lut = (lut << 8) | (j & 1456 lut |= next_queue(adapter, j) << 16;
1406 ((0x1 << 8) - 1)); 1457 lut |= next_queue(adapter, j) << 24;
1407 /* On i = 3, we have 4 entries in lut; write to the register */ 1458 wr32(hw, I40E_VFQF_HLUT(i), lut);
1408 if ((i & 3) == 3)
1409 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1410 } 1459 }
1411 i40e_flush(hw); 1460 i40e_flush(hw);
1412} 1461}
1413 1462
1463#define I40EVF_RESET_WAIT_MS 100
1464#define I40EVF_RESET_WAIT_COUNT 200
1414/** 1465/**
1415 * i40evf_reset_task - Call-back task to handle hardware reset 1466 * i40evf_reset_task - Call-back task to handle hardware reset
1416 * @work: pointer to work_struct 1467 * @work: pointer to work_struct
@@ -1421,8 +1472,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1421 **/ 1472 **/
1422static void i40evf_reset_task(struct work_struct *work) 1473static void i40evf_reset_task(struct work_struct *work)
1423{ 1474{
1424 struct i40evf_adapter *adapter = 1475 struct i40evf_adapter *adapter = container_of(work,
1425 container_of(work, struct i40evf_adapter, reset_task); 1476 struct i40evf_adapter,
1477 reset_task);
1426 struct i40e_hw *hw = &adapter->hw; 1478 struct i40e_hw *hw = &adapter->hw;
1427 int i = 0, err; 1479 int i = 0, err;
1428 uint32_t rstat_val; 1480 uint32_t rstat_val;
@@ -1431,21 +1483,61 @@ static void i40evf_reset_task(struct work_struct *work)
1431 &adapter->crit_section)) 1483 &adapter->crit_section))
1432 udelay(500); 1484 udelay(500);
1433 1485
1434 /* wait until the reset is complete */ 1486 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1435 for (i = 0; i < 20; i++) { 1487 dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
1488 i40evf_request_reset(adapter);
1489 }
1490
1491 /* poll until we see the reset actually happen */
1492 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1436 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1493 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1437 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1494 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1438 if (rstat_val == I40E_VFR_COMPLETED) 1495 if (rstat_val != I40E_VFR_VFACTIVE) {
1496 dev_info(&adapter->pdev->dev, "Reset now occurring\n");
1439 break; 1497 break;
1440 else 1498 } else {
1441 mdelay(100); 1499 msleep(I40EVF_RESET_WAIT_MS);
1500 }
1501 }
1502 if (i == I40EVF_RESET_WAIT_COUNT) {
1503 dev_err(&adapter->pdev->dev, "Reset was not detected\n");
1504 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1505 goto continue_reset; /* act like the reset happened */
1506 }
1507
1508 /* wait until the reset is complete and the PF is responding to us */
1509 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1510 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1511 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1512 if (rstat_val == I40E_VFR_VFACTIVE) {
1513 dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
1514 break;
1515 } else {
1516 msleep(I40EVF_RESET_WAIT_MS);
1517 }
1442 } 1518 }
1443 if (i == 20) { 1519 if (i == I40EVF_RESET_WAIT_COUNT) {
1444 /* reset never finished */ 1520 /* reset never finished */
1445 dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n", 1521 dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
1446 __func__, rstat_val); 1522 rstat_val);
1447 /* carry on anyway */ 1523 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1524
1525 if (netif_running(adapter->netdev))
1526 i40evf_close(adapter->netdev);
1527
1528 i40evf_free_misc_irq(adapter);
1529 i40evf_reset_interrupt_capability(adapter);
1530 i40evf_free_queues(adapter);
1531 kfree(adapter->vf_res);
1532 i40evf_shutdown_adminq(hw);
1533 adapter->netdev->flags &= ~IFF_UP;
1534 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1535 return; /* Do not attempt to reinit. It's dead, Jim. */
1448 } 1536 }
1537
1538continue_reset:
1539 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1540
1449 i40evf_down(adapter); 1541 i40evf_down(adapter);
1450 adapter->state = __I40EVF_RESETTING; 1542 adapter->state = __I40EVF_RESETTING;
1451 1543
@@ -1505,6 +1597,9 @@ static void i40evf_adminq_task(struct work_struct *work)
1505 i40e_status ret; 1597 i40e_status ret;
1506 u16 pending; 1598 u16 pending;
1507 1599
1600 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1601 return;
1602
1508 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 1603 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
1509 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 1604 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
1510 if (!event.msg_buf) { 1605 if (!event.msg_buf) {
@@ -1636,6 +1731,10 @@ static int i40evf_open(struct net_device *netdev)
1636 struct i40evf_adapter *adapter = netdev_priv(netdev); 1731 struct i40evf_adapter *adapter = netdev_priv(netdev);
1637 int err; 1732 int err;
1638 1733
1734 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1735 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
1736 return -EIO;
1737 }
1639 if (adapter->state != __I40EVF_DOWN) 1738 if (adapter->state != __I40EVF_DOWN)
1640 return -EBUSY; 1739 return -EBUSY;
1641 1740
@@ -1690,8 +1789,12 @@ static int i40evf_close(struct net_device *netdev)
1690{ 1789{
1691 struct i40evf_adapter *adapter = netdev_priv(netdev); 1790 struct i40evf_adapter *adapter = netdev_priv(netdev);
1692 1791
1792 if (adapter->state <= __I40EVF_DOWN)
1793 return 0;
1794
1693 /* signal that we are down to the interrupt handler */ 1795 /* signal that we are down to the interrupt handler */
1694 adapter->state = __I40EVF_DOWN; 1796 adapter->state = __I40EVF_DOWN;
1797
1695 set_bit(__I40E_DOWN, &adapter->vsi.state); 1798 set_bit(__I40E_DOWN, &adapter->vsi.state);
1696 1799
1697 i40evf_down(adapter); 1800 i40evf_down(adapter);
@@ -1842,16 +1945,18 @@ static void i40evf_init_task(struct work_struct *work)
1842 switch (adapter->state) { 1945 switch (adapter->state) {
1843 case __I40EVF_STARTUP: 1946 case __I40EVF_STARTUP:
1844 /* driver loaded, probe complete */ 1947 /* driver loaded, probe complete */
1948 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1949 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1845 err = i40e_set_mac_type(hw); 1950 err = i40e_set_mac_type(hw);
1846 if (err) { 1951 if (err) {
1847 dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n", 1952 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
1848 __func__, err); 1953 err);
1849 goto err; 1954 goto err;
1850 } 1955 }
1851 err = i40evf_check_reset_complete(hw); 1956 err = i40evf_check_reset_complete(hw);
1852 if (err) { 1957 if (err) {
1853 dev_info(&pdev->dev, "%s: device is still in reset (%d).\n", 1958 dev_err(&pdev->dev, "Device is still in reset (%d)\n",
1854 __func__, err); 1959 err);
1855 goto err; 1960 goto err;
1856 } 1961 }
1857 hw->aq.num_arq_entries = I40EVF_AQ_LEN; 1962 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1966,13 @@ static void i40evf_init_task(struct work_struct *work)
1861 1966
1862 err = i40evf_init_adminq(hw); 1967 err = i40evf_init_adminq(hw);
1863 if (err) { 1968 if (err) {
1864 dev_info(&pdev->dev, "%s: init_adminq failed: %d\n", 1969 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
1865 __func__, err); 1970 err);
1866 goto err; 1971 goto err;
1867 } 1972 }
1868 err = i40evf_send_api_ver(adapter); 1973 err = i40evf_send_api_ver(adapter);
1869 if (err) { 1974 if (err) {
1870 dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n", 1975 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1871 __func__, err);
1872 i40evf_shutdown_adminq(hw); 1976 i40evf_shutdown_adminq(hw);
1873 goto err; 1977 goto err;
1874 } 1978 }
@@ -1876,19 +1980,21 @@ static void i40evf_init_task(struct work_struct *work)
1876 goto restart; 1980 goto restart;
1877 break; 1981 break;
1878 case __I40EVF_INIT_VERSION_CHECK: 1982 case __I40EVF_INIT_VERSION_CHECK:
1879 if (!i40evf_asq_done(hw)) 1983 if (!i40evf_asq_done(hw)) {
1984 dev_err(&pdev->dev, "Admin queue command never completed.\n");
1880 goto err; 1985 goto err;
1986 }
1881 1987
1882 /* aq msg sent, awaiting reply */ 1988 /* aq msg sent, awaiting reply */
1883 err = i40evf_verify_api_ver(adapter); 1989 err = i40evf_verify_api_ver(adapter);
1884 if (err) { 1990 if (err) {
1885 dev_err(&pdev->dev, "Unable to verify API version, error %d\n", 1991 dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
1886 err); 1992 err);
1887 goto err; 1993 goto err;
1888 } 1994 }
1889 err = i40evf_send_vf_config_msg(adapter); 1995 err = i40evf_send_vf_config_msg(adapter);
1890 if (err) { 1996 if (err) {
1891 dev_err(&pdev->dev, "Unable send config request, error %d\n", 1997 dev_err(&pdev->dev, "Unable send config request (%d)\n",
1892 err); 1998 err);
1893 goto err; 1999 goto err;
1894 } 2000 }
@@ -1902,18 +2008,15 @@ static void i40evf_init_task(struct work_struct *work)
1902 (I40E_MAX_VF_VSI * 2008 (I40E_MAX_VF_VSI *
1903 sizeof(struct i40e_virtchnl_vsi_resource)); 2009 sizeof(struct i40e_virtchnl_vsi_resource));
1904 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); 2010 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
1905 if (!adapter->vf_res) { 2011 if (!adapter->vf_res)
1906 dev_err(&pdev->dev, "%s: unable to allocate memory\n",
1907 __func__);
1908 goto err; 2012 goto err;
1909 }
1910 } 2013 }
1911 err = i40evf_get_vf_config(adapter); 2014 err = i40evf_get_vf_config(adapter);
1912 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) 2015 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
1913 goto restart; 2016 goto restart;
1914 if (err) { 2017 if (err) {
1915 dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n", 2018 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
1916 __func__, err); 2019 err);
1917 goto err_alloc; 2020 goto err_alloc;
1918 } 2021 }
1919 adapter->state = __I40EVF_INIT_SW; 2022 adapter->state = __I40EVF_INIT_SW;
@@ -1927,25 +2030,23 @@ static void i40evf_init_task(struct work_struct *work)
1927 adapter->vsi_res = &adapter->vf_res->vsi_res[i]; 2030 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
1928 } 2031 }
1929 if (!adapter->vsi_res) { 2032 if (!adapter->vsi_res) {
1930 dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__); 2033 dev_err(&pdev->dev, "No LAN VSI found\n");
1931 goto err_alloc; 2034 goto err_alloc;
1932 } 2035 }
1933 2036
1934 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; 2037 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
1935 2038
1936 adapter->txd_count = I40EVF_DEFAULT_TXD;
1937 adapter->rxd_count = I40EVF_DEFAULT_RXD;
1938
1939 netdev->netdev_ops = &i40evf_netdev_ops; 2039 netdev->netdev_ops = &i40evf_netdev_ops;
1940 i40evf_set_ethtool_ops(netdev); 2040 i40evf_set_ethtool_ops(netdev);
1941 netdev->watchdog_timeo = 5 * HZ; 2041 netdev->watchdog_timeo = 5 * HZ;
1942 2042 netdev->features |= NETIF_F_HIGHDMA |
1943 netdev->features |= NETIF_F_SG | 2043 NETIF_F_SG |
1944 NETIF_F_IP_CSUM | 2044 NETIF_F_IP_CSUM |
1945 NETIF_F_SCTP_CSUM | 2045 NETIF_F_SCTP_CSUM |
1946 NETIF_F_IPV6_CSUM | 2046 NETIF_F_IPV6_CSUM |
1947 NETIF_F_TSO | 2047 NETIF_F_TSO |
1948 NETIF_F_TSO6 | 2048 NETIF_F_TSO6 |
2049 NETIF_F_RXCSUM |
1949 NETIF_F_GRO; 2050 NETIF_F_GRO;
1950 2051
1951 if (adapter->vf_res->vf_offload_flags 2052 if (adapter->vf_res->vf_offload_flags
@@ -1956,11 +2057,13 @@ static void i40evf_init_task(struct work_struct *work)
1956 NETIF_F_HW_VLAN_CTAG_FILTER; 2057 NETIF_F_HW_VLAN_CTAG_FILTER;
1957 } 2058 }
1958 2059
1959 /* The HW MAC address was set and/or determined in sw_init */ 2060 /* copy netdev features into list of user selectable features */
2061 netdev->hw_features |= netdev->features;
2062 netdev->hw_features &= ~NETIF_F_RXCSUM;
2063
1960 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2064 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1961 dev_info(&pdev->dev, 2065 dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
1962 "Invalid MAC address %pMAC, using random\n", 2066 adapter->hw.mac.addr);
1963 adapter->hw.mac.addr);
1964 random_ether_addr(adapter->hw.mac.addr); 2067 random_ether_addr(adapter->hw.mac.addr);
1965 } 2068 }
1966 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2069 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2097,6 @@ static void i40evf_init_task(struct work_struct *work)
1994 2097
1995 netif_carrier_off(netdev); 2098 netif_carrier_off(netdev);
1996 2099
1997 strcpy(netdev->name, "eth%d");
1998
1999 adapter->vsi.id = adapter->vsi_res->vsi_id; 2100 adapter->vsi.id = adapter->vsi_res->vsi_id;
2000 adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ 2101 adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
2001 adapter->vsi.back = adapter; 2102 adapter->vsi.back = adapter;
@@ -2005,9 +2106,11 @@ static void i40evf_init_task(struct work_struct *work)
2005 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; 2106 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
2006 adapter->vsi.netdev = adapter->netdev; 2107 adapter->vsi.netdev = adapter->netdev;
2007 2108
2008 err = register_netdev(netdev); 2109 if (!adapter->netdev_registered) {
2009 if (err) 2110 err = register_netdev(netdev);
2010 goto err_register; 2111 if (err)
2112 goto err_register;
2113 }
2011 2114
2012 adapter->netdev_registered = true; 2115 adapter->netdev_registered = true;
2013 2116
@@ -2031,7 +2134,6 @@ err_register:
2031 i40evf_free_misc_irq(adapter); 2134 i40evf_free_misc_irq(adapter);
2032err_sw_init: 2135err_sw_init:
2033 i40evf_reset_interrupt_capability(adapter); 2136 i40evf_reset_interrupt_capability(adapter);
2034 adapter->state = __I40EVF_FAILED;
2035err_alloc: 2137err_alloc:
2036 kfree(adapter->vf_res); 2138 kfree(adapter->vf_res);
2037 adapter->vf_res = NULL; 2139 adapter->vf_res = NULL;
@@ -2039,9 +2141,7 @@ err:
2039 /* Things went into the weeds, so try again later */ 2141 /* Things went into the weeds, so try again later */
2040 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { 2142 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2041 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); 2143 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
2042 if (hw->aq.asq.count) 2144 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2043 i40evf_shutdown_adminq(hw); /* ignore error */
2044 adapter->state = __I40EVF_FAILED;
2045 return; /* do not reschedule */ 2145 return; /* do not reschedule */
2046 } 2146 }
2047 schedule_delayed_work(&adapter->init_task, HZ * 3); 2147 schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,26 +2184,20 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2084 struct net_device *netdev; 2184 struct net_device *netdev;
2085 struct i40evf_adapter *adapter = NULL; 2185 struct i40evf_adapter *adapter = NULL;
2086 struct i40e_hw *hw = NULL; 2186 struct i40e_hw *hw = NULL;
2087 int err, pci_using_dac; 2187 int err;
2088 2188
2089 err = pci_enable_device(pdev); 2189 err = pci_enable_device(pdev);
2090 if (err) 2190 if (err)
2091 return err; 2191 return err;
2092 2192
2093 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 2193 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2094 pci_using_dac = true; 2194 if (err) {
2095 /* coherent mask for the same size will always succeed if 2195 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2096 * dma_set_mask does 2196 if (err) {
2097 */ 2197 dev_err(&pdev->dev,
2098 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2198 "DMA configuration failed: 0x%x\n", err);
2099 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 2199 goto err_dma;
2100 pci_using_dac = false; 2200 }
2101 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2102 } else {
2103 dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
2104 __func__, err);
2105 err = -EIO;
2106 goto err_dma;
2107 } 2201 }
2108 2202
2109 err = pci_request_regions(pdev, i40evf_driver_name); 2203 err = pci_request_regions(pdev, i40evf_driver_name);
@@ -2128,8 +2222,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2128 2222
2129 pci_set_drvdata(pdev, netdev); 2223 pci_set_drvdata(pdev, netdev);
2130 adapter = netdev_priv(netdev); 2224 adapter = netdev_priv(netdev);
2131 if (pci_using_dac)
2132 netdev->features |= NETIF_F_HIGHDMA;
2133 2225
2134 adapter->netdev = netdev; 2226 adapter->netdev = netdev;
2135 adapter->pdev = pdev; 2227 adapter->pdev = pdev;
@@ -2271,6 +2363,7 @@ static void i40evf_remove(struct pci_dev *pdev)
2271 struct i40e_hw *hw = &adapter->hw; 2363 struct i40e_hw *hw = &adapter->hw;
2272 2364
2273 cancel_delayed_work_sync(&adapter->init_task); 2365 cancel_delayed_work_sync(&adapter->init_task);
2366 cancel_work_sync(&adapter->reset_task);
2274 2367
2275 if (adapter->netdev_registered) { 2368 if (adapter->netdev_registered) {
2276 unregister_netdev(netdev); 2369 unregister_netdev(netdev);
@@ -2278,17 +2371,15 @@ static void i40evf_remove(struct pci_dev *pdev)
2278 } 2371 }
2279 adapter->state = __I40EVF_REMOVE; 2372 adapter->state = __I40EVF_REMOVE;
2280 2373
2281 if (adapter->num_msix_vectors) { 2374 if (adapter->msix_entries) {
2282 i40evf_misc_irq_disable(adapter); 2375 i40evf_misc_irq_disable(adapter);
2283 del_timer_sync(&adapter->watchdog_timer);
2284
2285 flush_scheduled_work();
2286
2287 i40evf_free_misc_irq(adapter); 2376 i40evf_free_misc_irq(adapter);
2288
2289 i40evf_reset_interrupt_capability(adapter); 2377 i40evf_reset_interrupt_capability(adapter);
2290 } 2378 }
2291 2379
2380 del_timer_sync(&adapter->watchdog_timer);
2381 flush_scheduled_work();
2382
2292 if (hw->aq.asq.count) 2383 if (hw->aq.asq.count)
2293 i40evf_shutdown_adminq(hw); 2384 i40evf_shutdown_adminq(hw);
2294 2385
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e6978d79e62b..e294f012647d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
43 struct i40e_hw *hw = &adapter->hw; 43 struct i40e_hw *hw = &adapter->hw;
44 i40e_status err; 44 i40e_status err;
45 45
46 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
47 return 0; /* nothing to see here, move along */
48
46 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 49 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
47 if (err) 50 if (err)
48 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", 51 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
651 /* if the request failed, don't lock out others */ 654 /* if the request failed, don't lock out others */
652 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 655 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
653} 656}
657/**
658 * i40evf_request_reset
659 * @adapter: adapter structure
660 *
661 * Request that the PF reset this VF. No response is expected.
662 **/
663void i40evf_request_reset(struct i40evf_adapter *adapter)
664{
665 /* Don't check CURRENT_OP - this is always higher priority */
666 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
667 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
668}
654 669
655/** 670/**
656 * i40evf_virtchnl_completion 671 * i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
689 } 704 }
690 break; 705 break;
691 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: 706 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
692 adapter->state = __I40EVF_RESETTING; 707 dev_info(&adapter->pdev->dev, "PF reset warning received\n");
693 schedule_work(&adapter->reset_task); 708 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
694 dev_info(&adapter->pdev->dev, 709 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
695 "%s: hardware reset pending\n", __func__); 710 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
711 schedule_work(&adapter->reset_task);
712 }
696 break; 713 break;
697 default: 714 default:
698 dev_err(&adapter->pdev->dev, 715 dev_err(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index f19700e285bb..5bcb2de75933 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82575 PCI-Express Ethernet Linux driver 3# Intel 82575 PCI-Express Ethernet Linux driver
4# Copyright(c) 1999 - 2013 Intel Corporation. 4# Copyright(c) 1999 - 2014 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13# more details. 13# more details.
14# 14#
15# You should have received a copy of the GNU General Public License along with 15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc., 16# this program; if not, see <http://www.gnu.org/licenses/>.
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18# 17#
19# The full GNU General Public License is included in this distribution in 18# The full GNU General Public License is included in this distribution in
20# the file called "COPYING". 19# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 06df6928f44c..fa36fe12e775 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -77,8 +76,6 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
77static const u16 e1000_82580_rxpbs_table[] = 76static const u16 e1000_82580_rxpbs_table[] =
78 { 36, 72, 144, 1, 2, 4, 8, 16, 77 { 36, 72, 144, 1, 2, 4, 8, 16,
79 35, 70, 140 }; 78 35, 70, 140 };
80#define E1000_82580_RXPBS_TABLE_SIZE \
81 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
82 79
83/** 80/**
84 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 81 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -2308,7 +2305,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
2308{ 2305{
2309 u16 ret_val = 0; 2306 u16 ret_val = 0;
2310 2307
2311 if (data < E1000_82580_RXPBS_TABLE_SIZE) 2308 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
2312 ret_val = e1000_82580_rxpbs_table[data]; 2309 ret_val = e1000_82580_rxpbs_table[data];
2313 2310
2314 return ret_val; 2311 return ret_val;
@@ -2714,13 +2711,14 @@ static const u8 e1000_emc_therm_limit[4] = {
2714 E1000_EMC_DIODE3_THERM_LIMIT 2711 E1000_EMC_DIODE3_THERM_LIMIT
2715}; 2712};
2716 2713
2714#ifdef CONFIG_IGB_HWMON
2717/** 2715/**
2718 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2716 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2719 * @hw: pointer to hardware structure 2717 * @hw: pointer to hardware structure
2720 * 2718 *
2721 * Updates the temperatures in mac.thermal_sensor_data 2719 * Updates the temperatures in mac.thermal_sensor_data
2722 **/ 2720 **/
2723s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2721static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2724{ 2722{
2725 s32 status = E1000_SUCCESS; 2723 s32 status = E1000_SUCCESS;
2726 u16 ets_offset; 2724 u16 ets_offset;
@@ -2774,7 +2772,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2774 * Sets the thermal sensor thresholds according to the NVM map 2772 * Sets the thermal sensor thresholds according to the NVM map
2775 * and save off the threshold and location values into mac.thermal_sensor_data 2773 * and save off the threshold and location values into mac.thermal_sensor_data
2776 **/ 2774 **/
2777s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2775static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2778{ 2776{
2779 s32 status = E1000_SUCCESS; 2777 s32 status = E1000_SUCCESS;
2780 u16 ets_offset; 2778 u16 ets_offset;
@@ -2836,6 +2834,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2836 return status; 2834 return status;
2837} 2835}
2838 2836
2837#endif
2839static struct e1000_mac_operations e1000_mac_ops_82575 = { 2838static struct e1000_mac_operations e1000_mac_ops_82575 = {
2840 .init_hw = igb_init_hw_82575, 2839 .init_hw = igb_init_hw_82575,
2841 .check_for_link = igb_check_for_link_82575, 2840 .check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 8c2437722aad..09d78be72416 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -231,6 +230,10 @@ struct e1000_adv_tx_context_desc {
231#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ 230#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
232#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ 231#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
233 232
233#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
234#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
235#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
236
234#define E1000_VLVF_ARRAY_SIZE 32 237#define E1000_VLVF_ARRAY_SIZE 32
235#define E1000_VLVF_VLANID_MASK 0x00000FFF 238#define E1000_VLVF_VLANID_MASK 0x00000FFF
236#define E1000_VLVF_POOLSEL_SHIFT 12 239#define E1000_VLVF_POOLSEL_SHIFT 12
@@ -266,8 +269,7 @@ u16 igb_rxpbs_adjust_82580(u32 data);
266s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); 269s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
267s32 igb_set_eee_i350(struct e1000_hw *); 270s32 igb_set_eee_i350(struct e1000_hw *);
268s32 igb_set_eee_i354(struct e1000_hw *); 271s32 igb_set_eee_i354(struct e1000_hw *);
269s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *); 272s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
270s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
271 273
272#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 274#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
273#define E1000_EMC_INTERNAL_DATA 0x00 275#define E1000_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 0571b973be80..b05bf925ac72 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -44,7 +43,11 @@
44#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 43#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
45 44
46/* Extended Device Control */ 45/* Extended Device Control */
46#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
47#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ 47#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
48#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
49#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
50
48/* Physical Func Reset Done Indication */ 51/* Physical Func Reset Done Indication */
49#define E1000_CTRL_EXT_PFRSTD 0x00004000 52#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 53#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -191,7 +194,8 @@
191/* enable link status from external LINK_0 and LINK_1 pins */ 194/* enable link status from external LINK_0 and LINK_1 pins */
192#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
193#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
194#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 197#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
198#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
195#define E1000_CTRL_RST 0x04000000 /* Global reset */ 199#define E1000_CTRL_RST 0x04000000 /* Global reset */
196#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 200#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
197#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ 201#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -529,8 +533,67 @@
529 533
530#define E1000_TIMINCA_16NS_SHIFT 24 534#define E1000_TIMINCA_16NS_SHIFT 24
531 535
532#define E1000_TSICR_TXTS 0x00000002 536/* Time Sync Interrupt Cause/Mask Register Bits */
533#define E1000_TSIM_TXTS 0x00000002 537
538#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
539#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
540#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
541#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
542#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
543#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
544#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
545#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
546
547#define TSYNC_INTERRUPTS TSINTR_TXTS
548#define E1000_TSICR_TXTS TSINTR_TXTS
549
550/* TSAUXC Configuration Bits */
551#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
552#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
553#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
554#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
555#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
556#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
557#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
558#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
559#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
560#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
561#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
562#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
563#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
564#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
565
566/* SDP Configuration Bits */
567#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
568#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
569#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
570#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
571#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
572#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
573#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
574#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
575#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
576#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
577#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
578#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
579#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
580#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
581#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
582#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
583#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
584#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
585#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
586#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
587#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
588#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
589#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
590#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
591#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
592#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
593#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
594#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
595#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
596#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
534 597
535#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ 598#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
536#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ 599#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ab99e2b582a8..10741d170f2d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 0c0393316a3a..db963397cc27 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -35,6 +34,8 @@
35#include "e1000_hw.h" 34#include "e1000_hw.h"
36#include "e1000_i210.h" 35#include "e1000_i210.h"
37 36
37static s32 igb_update_flash_i210(struct e1000_hw *hw);
38
38/** 39/**
39 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore 40 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
40 * @hw: pointer to the HW structure 41 * @hw: pointer to the HW structure
@@ -111,7 +112,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
111 * Return successful if access grant bit set, else clear the request for 112 * Return successful if access grant bit set, else clear the request for
112 * EEPROM access and return -E1000_ERR_NVM (-1). 113 * EEPROM access and return -E1000_ERR_NVM (-1).
113 **/ 114 **/
114s32 igb_acquire_nvm_i210(struct e1000_hw *hw) 115static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
115{ 116{
116 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 117 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
117} 118}
@@ -123,7 +124,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
123 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 124 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
124 * then release the semaphores acquired. 125 * then release the semaphores acquired.
125 **/ 126 **/
126void igb_release_nvm_i210(struct e1000_hw *hw) 127static void igb_release_nvm_i210(struct e1000_hw *hw)
127{ 128{
128 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 129 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
129} 130}
@@ -206,8 +207,8 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
206 * Reads a 16 bit word from the Shadow Ram using the EERD register. 207 * Reads a 16 bit word from the Shadow Ram using the EERD register.
207 * Uses necessary synchronization semaphores. 208 * Uses necessary synchronization semaphores.
208 **/ 209 **/
209s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 210static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
210 u16 *data) 211 u16 *data)
211{ 212{
212 s32 status = E1000_SUCCESS; 213 s32 status = E1000_SUCCESS;
213 u16 i, count; 214 u16 i, count;
@@ -306,8 +307,8 @@ out:
306 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 307 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
307 * partially written. 308 * partially written.
308 **/ 309 **/
309s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 310static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
310 u16 *data) 311 u16 *data)
311{ 312{
312 s32 status = E1000_SUCCESS; 313 s32 status = E1000_SUCCESS;
313 u16 i, count; 314 u16 i, count;
@@ -555,7 +556,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
555 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 556 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
556 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 557 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
557 **/ 558 **/
558s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) 559static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
559{ 560{
560 s32 status = E1000_SUCCESS; 561 s32 status = E1000_SUCCESS;
561 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 562 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
@@ -590,7 +591,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
590 * up to the checksum. Then calculates the EEPROM checksum and writes the 591 * up to the checksum. Then calculates the EEPROM checksum and writes the
591 * value to the EEPROM. Next commit EEPROM data onto the Flash. 592 * value to the EEPROM. Next commit EEPROM data onto the Flash.
592 **/ 593 **/
593s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) 594static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
594{ 595{
595 s32 ret_val = E1000_SUCCESS; 596 s32 ret_val = E1000_SUCCESS;
596 u16 checksum = 0; 597 u16 checksum = 0;
@@ -684,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
684 * @hw: pointer to the HW structure 685 * @hw: pointer to the HW structure
685 * 686 *
686 **/ 687 **/
687s32 igb_update_flash_i210(struct e1000_hw *hw) 688static s32 igb_update_flash_i210(struct e1000_hw *hw)
688{ 689{
689 s32 ret_val = E1000_SUCCESS; 690 s32 ret_val = E1000_SUCCESS;
690 u32 flup; 691 u32 flup;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 2d913716573a..907fe99a9813 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -28,17 +27,8 @@
28#ifndef _E1000_I210_H_ 27#ifndef _E1000_I210_H_
29#define _E1000_I210_H_ 28#define _E1000_I210_H_
30 29
31s32 igb_update_flash_i210(struct e1000_hw *hw);
32s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
33s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
34s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
35 u16 *data);
36s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
37 u16 *data);
38s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 30s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
39void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 31void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
40s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
41void igb_release_nvm_i210(struct e1000_hw *hw);
42s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 32s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
43s32 igb_read_invm_version(struct e1000_hw *hw, 33s32 igb_read_invm_version(struct e1000_hw *hw,
44 struct e1000_fw_version *invm_ver); 34 struct e1000_fw_version *invm_ver);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298f0ed50670..5910a932ea7c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e4cbe8ef67b3..99299ba8ee3a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index dac1447fabf7..d5b121771c31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index de9bba41acf3..f52f5515e5a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index a7db7f3db914..9abf82919c65 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 433b7419cb98..5b101170b17e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index ad2b74d95138..4009bbab7407 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -394,77 +393,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
394} 393}
395 394
396/** 395/**
397 * e1000_write_sfp_data_byte - Writes SFP module data.
398 * @hw: pointer to the HW structure
399 * @offset: byte location offset to write to
400 * @data: data to write
401 *
402 * Writes one byte to SFP module data stored
403 * in SFP resided EEPROM memory or SFP diagnostic area.
404 * Function should be called with
405 * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
406 * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
407 * access
408 **/
409s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
410{
411 u32 i = 0;
412 u32 i2ccmd = 0;
413 u32 data_local = 0;
414
415 if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
416 hw_dbg("I2CCMD command address exceeds upper limit\n");
417 return -E1000_ERR_PHY;
418 }
419 /* The programming interface is 16 bits wide
420 * so we need to read the whole word first
421 * then update appropriate byte lane and write
422 * the updated word back.
423 */
424 /* Set up Op-code, EEPROM Address,in the I2CCMD
425 * register. The MAC will take care of interfacing
426 * with an EEPROM to write the data given.
427 */
428 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
429 E1000_I2CCMD_OPCODE_READ);
430 /* Set a command to read single word */
431 wr32(E1000_I2CCMD, i2ccmd);
432 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
433 udelay(50);
434 /* Poll the ready bit to see if lastly
435 * launched I2C operation completed
436 */
437 i2ccmd = rd32(E1000_I2CCMD);
438 if (i2ccmd & E1000_I2CCMD_READY) {
439 /* Check if this is READ or WRITE phase */
440 if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
441 E1000_I2CCMD_OPCODE_READ) {
442 /* Write the selected byte
443 * lane and update whole word
444 */
445 data_local = i2ccmd & 0xFF00;
446 data_local |= data;
447 i2ccmd = ((offset <<
448 E1000_I2CCMD_REG_ADDR_SHIFT) |
449 E1000_I2CCMD_OPCODE_WRITE | data_local);
450 wr32(E1000_I2CCMD, i2ccmd);
451 } else {
452 break;
453 }
454 }
455 }
456 if (!(i2ccmd & E1000_I2CCMD_READY)) {
457 hw_dbg("I2CCMD Write did not complete\n");
458 return -E1000_ERR_PHY;
459 }
460 if (i2ccmd & E1000_I2CCMD_ERROR) {
461 hw_dbg("I2CCMD Error bit set\n");
462 return -E1000_ERR_PHY;
463 }
464 return 0;
465}
466
467/**
468 * igb_read_phy_reg_igp - Read igp PHY register 396 * igb_read_phy_reg_igp - Read igp PHY register
469 * @hw: pointer to the HW structure 397 * @hw: pointer to the HW structure
470 * @offset: register offset to be read 398 * @offset: register offset to be read
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6a0873f2095a..4c2c36c46a73 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -70,7 +69,6 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
70s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); 69s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
71s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); 70s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
72s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); 71s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
73s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
74s32 igb_copper_link_setup_82580(struct e1000_hw *hw); 72s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
75s32 igb_get_phy_info_82580(struct e1000_hw *hw); 73s32 igb_get_phy_info_82580(struct e1000_hw *hw);
76s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); 74s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 82632c6c53af..bdb246e848e1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -41,6 +40,7 @@
41#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 40#define E1000_FCT 0x00030 /* Flow Control Type - RW */
42#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ 41#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
43#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 42#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
43#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
44#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ 44#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
45#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ 45#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
46#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 46#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
@@ -102,6 +102,14 @@
102#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ 102#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
103#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ 103#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
104#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ 104#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
105#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
106#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
107#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
108#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
109#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
110#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
111#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
112#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
105#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ 113#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
106#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ 114#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
107#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ 115#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
@@ -349,16 +357,30 @@
349#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) 357#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
350#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) 358#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
351#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) 359#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
360#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
352#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine 361#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
353 * Filter - RW */ 362 * Filter - RW */
354#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) 363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
355 364
356#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 365struct e1000_hw;
357#define rd32(reg) (readl(hw->hw_addr + reg)) 366
367u32 igb_rd32(struct e1000_hw *hw, u32 reg);
368
369/* write operations, indexed using DWORDS */
370#define wr32(reg, val) \
371do { \
372 u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
373 if (!E1000_REMOVED(hw_addr)) \
374 writel((val), &hw_addr[(reg)]); \
375} while (0)
376
377#define rd32(reg) (igb_rd32(hw, reg))
378
358#define wrfl() ((void)rd32(E1000_STATUS)) 379#define wrfl() ((void)rd32(E1000_STATUS))
359 380
360#define array_wr32(reg, offset, value) \ 381#define array_wr32(reg, offset, value) \
361 (writel(value, hw->hw_addr + reg + ((offset) << 2))) 382 wr32((reg) + ((offset) << 2), (value))
383
362#define array_rd32(reg, offset) \ 384#define array_rd32(reg, offset) \
363 (readl(hw->hw_addr + reg + ((offset) << 2))) 385 (readl(hw->hw_addr + reg + ((offset) << 2)))
364 386
@@ -397,4 +419,6 @@
397#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) 419#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
398#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ 420#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
399 421
422#define E1000_REMOVED(h) unlikely(!(h))
423
400#endif 424#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ccf472f073dd..7fbe1e925143 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -42,6 +41,7 @@
42#include <linux/i2c.h> 41#include <linux/i2c.h>
43#include <linux/i2c-algo-bit.h> 42#include <linux/i2c-algo-bit.h>
44#include <linux/pci.h> 43#include <linux/pci.h>
44#include <linux/mdio.h>
45 45
46struct igb_adapter; 46struct igb_adapter;
47 47
@@ -434,6 +434,7 @@ struct igb_adapter {
434 struct delayed_work ptp_overflow_work; 434 struct delayed_work ptp_overflow_work;
435 struct work_struct ptp_tx_work; 435 struct work_struct ptp_tx_work;
436 struct sk_buff *ptp_tx_skb; 436 struct sk_buff *ptp_tx_skb;
437 struct hwtstamp_config tstamp_config;
437 unsigned long ptp_tx_start; 438 unsigned long ptp_tx_start;
438 unsigned long last_rx_ptp_check; 439 unsigned long last_rx_ptp_check;
439 spinlock_t tmreg_lock; 440 spinlock_t tmreg_lock;
@@ -456,6 +457,7 @@ struct igb_adapter {
456 unsigned long link_check_timeout; 457 unsigned long link_check_timeout;
457 int copper_tries; 458 int copper_tries;
458 struct e1000_info ei; 459 struct e1000_info ei;
460 u16 eee_advert;
459}; 461};
460 462
461#define IGB_FLAG_HAS_MSI (1 << 0) 463#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -472,6 +474,7 @@ struct igb_adapter {
472#define IGB_FLAG_MAS_CAPABLE (1 << 11) 474#define IGB_FLAG_MAS_CAPABLE (1 << 11)
473#define IGB_FLAG_MAS_ENABLE (1 << 12) 475#define IGB_FLAG_MAS_ENABLE (1 << 12)
474#define IGB_FLAG_HAS_MSIX (1 << 13) 476#define IGB_FLAG_HAS_MSIX (1 << 13)
477#define IGB_FLAG_EEE (1 << 14)
475 478
476/* Media Auto Sense */ 479/* Media Auto Sense */
477#define IGB_MAS_ENABLE_0 0X0001 480#define IGB_MAS_ENABLE_0 0X0001
@@ -489,7 +492,8 @@ struct igb_adapter {
489enum e1000_state_t { 492enum e1000_state_t {
490 __IGB_TESTING, 493 __IGB_TESTING,
491 __IGB_RESETTING, 494 __IGB_RESETTING,
492 __IGB_DOWN 495 __IGB_DOWN,
496 __IGB_PTP_TX_IN_PROGRESS,
493}; 497};
494 498
495enum igb_boards { 499enum igb_boards {
@@ -525,9 +529,7 @@ void igb_set_fw_version(struct igb_adapter *);
525void igb_ptp_init(struct igb_adapter *adapter); 529void igb_ptp_init(struct igb_adapter *adapter);
526void igb_ptp_stop(struct igb_adapter *adapter); 530void igb_ptp_stop(struct igb_adapter *adapter);
527void igb_ptp_reset(struct igb_adapter *adapter); 531void igb_ptp_reset(struct igb_adapter *adapter);
528void igb_ptp_tx_work(struct work_struct *work);
529void igb_ptp_rx_hang(struct igb_adapter *adapter); 532void igb_ptp_rx_hang(struct igb_adapter *adapter);
530void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
531void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 533void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
532void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 534void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
533 struct sk_buff *skb); 535 struct sk_buff *skb);
@@ -545,8 +547,8 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
545 rx_ring->last_rx_timestamp = jiffies; 547 rx_ring->last_rx_timestamp = jiffies;
546} 548}
547 549
548int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, 550int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
549 int cmd); 551int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
550#ifdef CONFIG_IGB_HWMON 552#ifdef CONFIG_IGB_HWMON
551void igb_sysfs_exit(struct igb_adapter *adapter); 553void igb_sysfs_exit(struct igb_adapter *adapter);
552int igb_sysfs_init(struct igb_adapter *adapter); 554int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 1df02378de69..e5570acbeea8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -2274,15 +2273,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2274 2273
2275 ring = adapter->tx_ring[j]; 2274 ring = adapter->tx_ring[j];
2276 do { 2275 do {
2277 start = u64_stats_fetch_begin_bh(&ring->tx_syncp); 2276 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
2278 data[i] = ring->tx_stats.packets; 2277 data[i] = ring->tx_stats.packets;
2279 data[i+1] = ring->tx_stats.bytes; 2278 data[i+1] = ring->tx_stats.bytes;
2280 data[i+2] = ring->tx_stats.restart_queue; 2279 data[i+2] = ring->tx_stats.restart_queue;
2281 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); 2280 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
2282 do { 2281 do {
2283 start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); 2282 start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
2284 restart2 = ring->tx_stats.restart_queue2; 2283 restart2 = ring->tx_stats.restart_queue2;
2285 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); 2284 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
2286 data[i+2] += restart2; 2285 data[i+2] += restart2;
2287 2286
2288 i += IGB_TX_QUEUE_STATS_LEN; 2287 i += IGB_TX_QUEUE_STATS_LEN;
@@ -2290,13 +2289,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2290 for (j = 0; j < adapter->num_rx_queues; j++) { 2289 for (j = 0; j < adapter->num_rx_queues; j++) {
2291 ring = adapter->rx_ring[j]; 2290 ring = adapter->rx_ring[j];
2292 do { 2291 do {
2293 start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 2292 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
2294 data[i] = ring->rx_stats.packets; 2293 data[i] = ring->rx_stats.packets;
2295 data[i+1] = ring->rx_stats.bytes; 2294 data[i+1] = ring->rx_stats.bytes;
2296 data[i+2] = ring->rx_stats.drops; 2295 data[i+2] = ring->rx_stats.drops;
2297 data[i+3] = ring->rx_stats.csum_err; 2296 data[i+3] = ring->rx_stats.csum_err;
2298 data[i+4] = ring->rx_stats.alloc_failed; 2297 data[i+4] = ring->rx_stats.alloc_failed;
2299 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); 2298 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
2300 i += IGB_RX_QUEUE_STATS_LEN; 2299 i += IGB_RX_QUEUE_STATS_LEN;
2301 } 2300 }
2302 spin_unlock(&adapter->stats64_lock); 2301 spin_unlock(&adapter->stats64_lock);
@@ -2354,6 +2353,11 @@ static int igb_get_ts_info(struct net_device *dev,
2354{ 2353{
2355 struct igb_adapter *adapter = netdev_priv(dev); 2354 struct igb_adapter *adapter = netdev_priv(dev);
2356 2355
2356 if (adapter->ptp_clock)
2357 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2358 else
2359 info->phc_index = -1;
2360
2357 switch (adapter->hw.mac.type) { 2361 switch (adapter->hw.mac.type) {
2358 case e1000_82575: 2362 case e1000_82575:
2359 info->so_timestamping = 2363 info->so_timestamping =
@@ -2375,11 +2379,6 @@ static int igb_get_ts_info(struct net_device *dev,
2375 SOF_TIMESTAMPING_RX_HARDWARE | 2379 SOF_TIMESTAMPING_RX_HARDWARE |
2376 SOF_TIMESTAMPING_RAW_HARDWARE; 2380 SOF_TIMESTAMPING_RAW_HARDWARE;
2377 2381
2378 if (adapter->ptp_clock)
2379 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2380 else
2381 info->phc_index = -1;
2382
2383 info->tx_types = 2382 info->tx_types =
2384 (1 << HWTSTAMP_TX_OFF) | 2383 (1 << HWTSTAMP_TX_OFF) |
2385 (1 << HWTSTAMP_TX_ON); 2384 (1 << HWTSTAMP_TX_ON);
@@ -2588,7 +2587,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2588{ 2587{
2589 struct igb_adapter *adapter = netdev_priv(netdev); 2588 struct igb_adapter *adapter = netdev_priv(netdev);
2590 struct e1000_hw *hw = &adapter->hw; 2589 struct e1000_hw *hw = &adapter->hw;
2591 u32 ipcnfg, eeer, ret_val; 2590 u32 ret_val;
2592 u16 phy_data; 2591 u16 phy_data;
2593 2592
2594 if ((hw->mac.type < e1000_i350) || 2593 if ((hw->mac.type < e1000_i350) ||
@@ -2597,16 +2596,25 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2597 2596
2598 edata->supported = (SUPPORTED_1000baseT_Full | 2597 edata->supported = (SUPPORTED_1000baseT_Full |
2599 SUPPORTED_100baseT_Full); 2598 SUPPORTED_100baseT_Full);
2599 if (!hw->dev_spec._82575.eee_disable)
2600 edata->advertised =
2601 mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
2602
2603 /* The IPCNFG and EEER registers are not supported on I354. */
2604 if (hw->mac.type == e1000_i354) {
2605 igb_get_eee_status_i354(hw, (bool *)&edata->eee_active);
2606 } else {
2607 u32 eeer;
2600 2608
2601 ipcnfg = rd32(E1000_IPCNFG); 2609 eeer = rd32(E1000_EEER);
2602 eeer = rd32(E1000_EEER);
2603 2610
2604 /* EEE status on negotiated link */ 2611 /* EEE status on negotiated link */
2605 if (ipcnfg & E1000_IPCNFG_EEE_1G_AN) 2612 if (eeer & E1000_EEER_EEE_NEG)
2606 edata->advertised = ADVERTISED_1000baseT_Full; 2613 edata->eee_active = true;
2607 2614
2608 if (ipcnfg & E1000_IPCNFG_EEE_100M_AN) 2615 if (eeer & E1000_EEER_TX_LPI_EN)
2609 edata->advertised |= ADVERTISED_100baseT_Full; 2616 edata->tx_lpi_enabled = true;
2617 }
2610 2618
2611 /* EEE Link Partner Advertised */ 2619 /* EEE Link Partner Advertised */
2612 switch (hw->mac.type) { 2620 switch (hw->mac.type) {
@@ -2617,8 +2625,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2617 return -ENODATA; 2625 return -ENODATA;
2618 2626
2619 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); 2627 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2620
2621 break; 2628 break;
2629 case e1000_i354:
2622 case e1000_i210: 2630 case e1000_i210:
2623 case e1000_i211: 2631 case e1000_i211:
2624 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, 2632 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
@@ -2634,12 +2642,10 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2634 break; 2642 break;
2635 } 2643 }
2636 2644
2637 if (eeer & E1000_EEER_EEE_NEG)
2638 edata->eee_active = true;
2639
2640 edata->eee_enabled = !hw->dev_spec._82575.eee_disable; 2645 edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
2641 2646
2642 if (eeer & E1000_EEER_TX_LPI_EN) 2647 if ((hw->mac.type == e1000_i354) &&
2648 (edata->eee_enabled))
2643 edata->tx_lpi_enabled = true; 2649 edata->tx_lpi_enabled = true;
2644 2650
2645 /* Report correct negotiated EEE status for devices that 2651 /* Report correct negotiated EEE status for devices that
@@ -2687,9 +2693,10 @@ static int igb_set_eee(struct net_device *netdev,
2687 return -EINVAL; 2693 return -EINVAL;
2688 } 2694 }
2689 2695
2690 if (eee_curr.advertised != edata->advertised) { 2696 if (edata->advertised &
2697 ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
2691 dev_err(&adapter->pdev->dev, 2698 dev_err(&adapter->pdev->dev,
2692 "Setting EEE Advertisement is not supported\n"); 2699 "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
2693 return -EINVAL; 2700 return -EINVAL;
2694 } 2701 }
2695 2702
@@ -2699,9 +2706,14 @@ static int igb_set_eee(struct net_device *netdev,
2699 return -EINVAL; 2706 return -EINVAL;
2700 } 2707 }
2701 2708
2709 adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
2702 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { 2710 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
2703 hw->dev_spec._82575.eee_disable = !edata->eee_enabled; 2711 hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
2704 igb_set_eee_i350(hw); 2712 adapter->flags |= IGB_FLAG_EEE;
2713 if (hw->mac.type == e1000_i350)
2714 igb_set_eee_i350(hw);
2715 else
2716 igb_set_eee_i354(hw);
2705 2717
2706 /* reset link */ 2718 /* reset link */
2707 if (netif_running(netdev)) 2719 if (netif_running(netdev))
@@ -2779,9 +2791,11 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2779 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 2791 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2780 for (i = 0; i < last_word - first_word + 1; i++) { 2792 for (i = 0; i < last_word - first_word + 1; i++) {
2781 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); 2793 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
2782 if (status != E1000_SUCCESS) 2794 if (status != E1000_SUCCESS) {
2783 /* Error occurred while reading module */ 2795 /* Error occurred while reading module */
2796 kfree(dataword);
2784 return -EIO; 2797 return -EIO;
2798 }
2785 2799
2786 be16_to_cpus(&dataword[i]); 2800 be16_to_cpus(&dataword[i]);
2787 } 2801 }
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index e0af5bc61613..8333f67acf96 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 46d31a49f5ea..30198185d19a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -70,7 +69,7 @@ char igb_driver_version[] = DRV_VERSION;
70static const char igb_driver_string[] = 69static const char igb_driver_string[] =
71 "Intel(R) Gigabit Ethernet Network Driver"; 70 "Intel(R) Gigabit Ethernet Network Driver";
72static const char igb_copyright[] = 71static const char igb_copyright[] =
73 "Copyright (c) 2007-2013 Intel Corporation."; 72 "Copyright (c) 2007-2014 Intel Corporation.";
74 73
75static const struct e1000_info *igb_info_tbl[] = { 74static const struct e1000_info *igb_info_tbl[] = {
76 [board_82575] = &e1000_82575_info, 75 [board_82575] = &e1000_82575_info,
@@ -752,6 +751,28 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
752 } 751 }
753} 752}
754 753
754u32 igb_rd32(struct e1000_hw *hw, u32 reg)
755{
756 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
757 u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
758 u32 value = 0;
759
760 if (E1000_REMOVED(hw_addr))
761 return ~value;
762
763 value = readl(&hw_addr[reg]);
764
765 /* reads should not return all F's */
766 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
767 struct net_device *netdev = igb->netdev;
768 hw->hw_addr = NULL;
769 netif_device_detach(netdev);
770 netdev_err(netdev, "PCIe link lost, device now detached\n");
771 }
772
773 return value;
774}
775
755/** 776/**
756 * igb_write_ivar - configure ivar for given MSI-X vector 777 * igb_write_ivar - configure ivar for given MSI-X vector
757 * @hw: pointer to the HW structure 778 * @hw: pointer to the HW structure
@@ -1014,6 +1035,12 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1014{ 1035{
1015 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1036 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1016 1037
1038 /* Coming from igb_set_interrupt_capability, the vectors are not yet
1039 * allocated. So, q_vector is NULL so we should stop here.
1040 */
1041 if (!q_vector)
1042 return;
1043
1017 if (q_vector->tx.ring) 1044 if (q_vector->tx.ring)
1018 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 1045 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1019 1046
@@ -1111,16 +1138,18 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1111 for (i = 0; i < numvecs; i++) 1138 for (i = 0; i < numvecs; i++)
1112 adapter->msix_entries[i].entry = i; 1139 adapter->msix_entries[i].entry = i;
1113 1140
1114 err = pci_enable_msix(adapter->pdev, 1141 err = pci_enable_msix_range(adapter->pdev,
1115 adapter->msix_entries, 1142 adapter->msix_entries,
1116 numvecs); 1143 numvecs,
1117 if (err == 0) 1144 numvecs);
1145 if (err > 0)
1118 return; 1146 return;
1119 1147
1120 igb_reset_interrupt_capability(adapter); 1148 igb_reset_interrupt_capability(adapter);
1121 1149
1122 /* If we can't do MSI-X, try MSI */ 1150 /* If we can't do MSI-X, try MSI */
1123msi_only: 1151msi_only:
1152 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1124#ifdef CONFIG_PCI_IOV 1153#ifdef CONFIG_PCI_IOV
1125 /* disable SR-IOV for non MSI-X configurations */ 1154 /* disable SR-IOV for non MSI-X configurations */
1126 if (adapter->vf_data) { 1155 if (adapter->vf_data) {
@@ -1726,6 +1755,10 @@ int igb_up(struct igb_adapter *adapter)
1726 hw->mac.get_link_status = 1; 1755 hw->mac.get_link_status = 1;
1727 schedule_work(&adapter->watchdog_task); 1756 schedule_work(&adapter->watchdog_task);
1728 1757
1758 if ((adapter->flags & IGB_FLAG_EEE) &&
1759 (!hw->dev_spec._82575.eee_disable))
1760 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
1761
1729 return 0; 1762 return 0;
1730} 1763}
1731 1764
@@ -1974,6 +2007,21 @@ void igb_reset(struct igb_adapter *adapter)
1974 } 2007 }
1975 } 2008 }
1976#endif 2009#endif
2010 /* Re-establish EEE setting */
2011 if (hw->phy.media_type == e1000_media_type_copper) {
2012 switch (mac->type) {
2013 case e1000_i350:
2014 case e1000_i210:
2015 case e1000_i211:
2016 igb_set_eee_i350(hw);
2017 break;
2018 case e1000_i354:
2019 igb_set_eee_i354(hw);
2020 break;
2021 default:
2022 break;
2023 }
2024 }
1977 if (!netif_running(adapter->netdev)) 2025 if (!netif_running(adapter->netdev))
1978 igb_power_down_link(adapter); 2026 igb_power_down_link(adapter);
1979 2027
@@ -2560,23 +2608,36 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2560 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : 2608 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
2561 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", 2609 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2562 adapter->num_rx_queues, adapter->num_tx_queues); 2610 adapter->num_rx_queues, adapter->num_tx_queues);
2563 switch (hw->mac.type) { 2611 if (hw->phy.media_type == e1000_media_type_copper) {
2564 case e1000_i350: 2612 switch (hw->mac.type) {
2565 case e1000_i210: 2613 case e1000_i350:
2566 case e1000_i211: 2614 case e1000_i210:
2567 igb_set_eee_i350(hw); 2615 case e1000_i211:
2568 break; 2616 /* Enable EEE for internal copper PHY devices */
2569 case e1000_i354: 2617 err = igb_set_eee_i350(hw);
2570 if (hw->phy.media_type == e1000_media_type_copper) { 2618 if ((!err) &&
2619 (!hw->dev_spec._82575.eee_disable)) {
2620 adapter->eee_advert =
2621 MDIO_EEE_100TX | MDIO_EEE_1000T;
2622 adapter->flags |= IGB_FLAG_EEE;
2623 }
2624 break;
2625 case e1000_i354:
2571 if ((rd32(E1000_CTRL_EXT) & 2626 if ((rd32(E1000_CTRL_EXT) &
2572 E1000_CTRL_EXT_LINK_MODE_SGMII)) 2627 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
2573 igb_set_eee_i354(hw); 2628 err = igb_set_eee_i354(hw);
2629 if ((!err) &&
2630 (!hw->dev_spec._82575.eee_disable)) {
2631 adapter->eee_advert =
2632 MDIO_EEE_100TX | MDIO_EEE_1000T;
2633 adapter->flags |= IGB_FLAG_EEE;
2634 }
2635 }
2636 break;
2637 default:
2638 break;
2574 } 2639 }
2575 break;
2576 default:
2577 break;
2578 } 2640 }
2579
2580 pm_runtime_put_noidle(&pdev->dev); 2641 pm_runtime_put_noidle(&pdev->dev);
2581 return 0; 2642 return 0;
2582 2643
@@ -2591,7 +2652,7 @@ err_eeprom:
2591 iounmap(hw->flash_address); 2652 iounmap(hw->flash_address);
2592err_sw_init: 2653err_sw_init:
2593 igb_clear_interrupt_scheme(adapter); 2654 igb_clear_interrupt_scheme(adapter);
2594 iounmap(hw->hw_addr); 2655 pci_iounmap(pdev, hw->hw_addr);
2595err_ioremap: 2656err_ioremap:
2596 free_netdev(netdev); 2657 free_netdev(netdev);
2597err_alloc_etherdev: 2658err_alloc_etherdev:
@@ -2758,7 +2819,7 @@ static void igb_remove(struct pci_dev *pdev)
2758 igb_disable_sriov(pdev); 2819 igb_disable_sriov(pdev);
2759#endif 2820#endif
2760 2821
2761 iounmap(hw->hw_addr); 2822 pci_iounmap(pdev, hw->hw_addr);
2762 if (hw->flash_address) 2823 if (hw->flash_address)
2763 iounmap(hw->flash_address); 2824 iounmap(hw->flash_address);
2764 pci_release_selected_regions(pdev, 2825 pci_release_selected_regions(pdev,
@@ -3510,6 +3571,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3510 3571
3511 vmolr = rd32(E1000_VMOLR(vfn)); 3572 vmolr = rd32(E1000_VMOLR(vfn));
3512 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ 3573 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3574 if (hw->mac.type == e1000_i350) {
3575 u32 dvmolr;
3576
3577 dvmolr = rd32(E1000_DVMOLR(vfn));
3578 dvmolr |= E1000_DVMOLR_STRVLAN;
3579 wr32(E1000_DVMOLR(vfn), dvmolr);
3580 }
3513 if (aupe) 3581 if (aupe)
3514 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ 3582 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3515 else 3583 else
@@ -4158,6 +4226,15 @@ static void igb_watchdog_task(struct work_struct *work)
4158 (ctrl & E1000_CTRL_RFCE) ? "RX" : 4226 (ctrl & E1000_CTRL_RFCE) ? "RX" :
4159 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); 4227 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
4160 4228
4229 /* disable EEE if enabled */
4230 if ((adapter->flags & IGB_FLAG_EEE) &&
4231 (adapter->link_duplex == HALF_DUPLEX)) {
4232 dev_info(&adapter->pdev->dev,
4233 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
4234 adapter->hw.dev_spec._82575.eee_disable = true;
4235 adapter->flags &= ~IGB_FLAG_EEE;
4236 }
4237
4161 /* check if SmartSpeed worked */ 4238 /* check if SmartSpeed worked */
4162 igb_check_downshift(hw); 4239 igb_check_downshift(hw);
4163 if (phy->speed_downgraded) 4240 if (phy->speed_downgraded)
@@ -4306,8 +4383,7 @@ enum latency_range {
4306 * were determined based on theoretical maximum wire speed and testing 4383 * were determined based on theoretical maximum wire speed and testing
4307 * data, in order to minimize response time while increasing bulk 4384 * data, in order to minimize response time while increasing bulk
4308 * throughput. 4385 * throughput.
4309 * This functionality is controlled by the InterruptThrottleRate module 4386 * This functionality is controlled by ethtool's coalescing settings.
4310 * parameter (see igb_param.c)
4311 * NOTE: This function is called only when operating in a multiqueue 4387 * NOTE: This function is called only when operating in a multiqueue
4312 * receive environment. 4388 * receive environment.
4313 **/ 4389 **/
@@ -4381,8 +4457,7 @@ clear_counts:
4381 * based on theoretical maximum wire speed and thresholds were set based 4457 * based on theoretical maximum wire speed and thresholds were set based
4382 * on testing data as well as attempting to minimize response time 4458 * on testing data as well as attempting to minimize response time
4383 * while increasing bulk throughput. 4459 * while increasing bulk throughput.
4384 * this functionality is controlled by the InterruptThrottleRate module 4460 * This functionality is controlled by ethtool's coalescing settings.
4385 * parameter (see igb_param.c)
4386 * NOTE: These calculations are only valid when operating in a single- 4461 * NOTE: These calculations are only valid when operating in a single-
4387 * queue environment. 4462 * queue environment.
4388 **/ 4463 **/
@@ -4546,7 +4621,7 @@ static int igb_tso(struct igb_ring *tx_ring,
4546 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4621 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4547 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 4622 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
4548 4623
4549 if (first->protocol == __constant_htons(ETH_P_IP)) { 4624 if (first->protocol == htons(ETH_P_IP)) {
4550 struct iphdr *iph = ip_hdr(skb); 4625 struct iphdr *iph = ip_hdr(skb);
4551 iph->tot_len = 0; 4626 iph->tot_len = 0;
4552 iph->check = 0; 4627 iph->check = 0;
@@ -4602,12 +4677,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4602 } else { 4677 } else {
4603 u8 l4_hdr = 0; 4678 u8 l4_hdr = 0;
4604 switch (first->protocol) { 4679 switch (first->protocol) {
4605 case __constant_htons(ETH_P_IP): 4680 case htons(ETH_P_IP):
4606 vlan_macip_lens |= skb_network_header_len(skb); 4681 vlan_macip_lens |= skb_network_header_len(skb);
4607 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4682 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4608 l4_hdr = ip_hdr(skb)->protocol; 4683 l4_hdr = ip_hdr(skb)->protocol;
4609 break; 4684 break;
4610 case __constant_htons(ETH_P_IPV6): 4685 case htons(ETH_P_IPV6):
4611 vlan_macip_lens |= skb_network_header_len(skb); 4686 vlan_macip_lens |= skb_network_header_len(skb);
4612 l4_hdr = ipv6_hdr(skb)->nexthdr; 4687 l4_hdr = ipv6_hdr(skb)->nexthdr;
4613 break; 4688 break;
@@ -4905,12 +4980,11 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4905 first->bytecount = skb->len; 4980 first->bytecount = skb->len;
4906 first->gso_segs = 1; 4981 first->gso_segs = 1;
4907 4982
4908 skb_tx_timestamp(skb);
4909
4910 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 4983 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4911 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); 4984 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4912 4985
4913 if (!(adapter->ptp_tx_skb)) { 4986 if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
4987 &adapter->state)) {
4914 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4988 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4915 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4989 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4916 4990
@@ -4921,6 +4995,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4921 } 4995 }
4922 } 4996 }
4923 4997
4998 skb_tx_timestamp(skb);
4999
4924 if (vlan_tx_tag_present(skb)) { 5000 if (vlan_tx_tag_present(skb)) {
4925 tx_flags |= IGB_TX_FLAGS_VLAN; 5001 tx_flags |= IGB_TX_FLAGS_VLAN;
4926 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 5002 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -5127,10 +5203,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5127 } 5203 }
5128 5204
5129 do { 5205 do {
5130 start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 5206 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
5131 _bytes = ring->rx_stats.bytes; 5207 _bytes = ring->rx_stats.bytes;
5132 _packets = ring->rx_stats.packets; 5208 _packets = ring->rx_stats.packets;
5133 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); 5209 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
5134 bytes += _bytes; 5210 bytes += _bytes;
5135 packets += _packets; 5211 packets += _packets;
5136 } 5212 }
@@ -5143,10 +5219,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5143 for (i = 0; i < adapter->num_tx_queues; i++) { 5219 for (i = 0; i < adapter->num_tx_queues; i++) {
5144 struct igb_ring *ring = adapter->tx_ring[i]; 5220 struct igb_ring *ring = adapter->tx_ring[i];
5145 do { 5221 do {
5146 start = u64_stats_fetch_begin_bh(&ring->tx_syncp); 5222 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
5147 _bytes = ring->tx_stats.bytes; 5223 _bytes = ring->tx_stats.bytes;
5148 _packets = ring->tx_stats.packets; 5224 _packets = ring->tx_stats.packets;
5149 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); 5225 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
5150 bytes += _bytes; 5226 bytes += _bytes;
5151 packets += _packets; 5227 packets += _packets;
5152 } 5228 }
@@ -6620,7 +6696,9 @@ static inline void igb_rx_hash(struct igb_ring *ring,
6620 struct sk_buff *skb) 6696 struct sk_buff *skb)
6621{ 6697{
6622 if (ring->netdev->features & NETIF_F_RXHASH) 6698 if (ring->netdev->features & NETIF_F_RXHASH)
6623 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 6699 skb_set_hash(skb,
6700 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
6701 PKT_HASH_TYPE_L3);
6624} 6702}
6625 6703
6626/** 6704/**
@@ -6690,7 +6768,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6690 hdr.network += ETH_HLEN; 6768 hdr.network += ETH_HLEN;
6691 6769
6692 /* handle any vlan tag if present */ 6770 /* handle any vlan tag if present */
6693 if (protocol == __constant_htons(ETH_P_8021Q)) { 6771 if (protocol == htons(ETH_P_8021Q)) {
6694 if ((hdr.network - data) > (max_len - VLAN_HLEN)) 6772 if ((hdr.network - data) > (max_len - VLAN_HLEN))
6695 return max_len; 6773 return max_len;
6696 6774
@@ -6699,7 +6777,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6699 } 6777 }
6700 6778
6701 /* handle L3 protocols */ 6779 /* handle L3 protocols */
6702 if (protocol == __constant_htons(ETH_P_IP)) { 6780 if (protocol == htons(ETH_P_IP)) {
6703 if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) 6781 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6704 return max_len; 6782 return max_len;
6705 6783
@@ -6713,7 +6791,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6713 /* record next protocol if header is present */ 6791 /* record next protocol if header is present */
6714 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) 6792 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
6715 nexthdr = hdr.ipv4->protocol; 6793 nexthdr = hdr.ipv4->protocol;
6716 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 6794 } else if (protocol == htons(ETH_P_IPV6)) {
6717 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 6795 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6718 return max_len; 6796 return max_len;
6719 6797
@@ -6903,7 +6981,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6903 unsigned int total_bytes = 0, total_packets = 0; 6981 unsigned int total_bytes = 0, total_packets = 0;
6904 u16 cleaned_count = igb_desc_unused(rx_ring); 6982 u16 cleaned_count = igb_desc_unused(rx_ring);
6905 6983
6906 do { 6984 while (likely(total_packets < budget)) {
6907 union e1000_adv_rx_desc *rx_desc; 6985 union e1000_adv_rx_desc *rx_desc;
6908 6986
6909 /* return some buffers to hardware, one at a time is too slow */ 6987 /* return some buffers to hardware, one at a time is too slow */
@@ -6955,7 +7033,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6955 7033
6956 /* update budget accounting */ 7034 /* update budget accounting */
6957 total_packets++; 7035 total_packets++;
6958 } while (likely(total_packets < budget)); 7036 }
6959 7037
6960 /* place incomplete frames back on ring for completion */ 7038 /* place incomplete frames back on ring for completion */
6961 rx_ring->skb = skb; 7039 rx_ring->skb = skb;
@@ -7114,8 +7192,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7114 case SIOCGMIIREG: 7192 case SIOCGMIIREG:
7115 case SIOCSMIIREG: 7193 case SIOCSMIIREG:
7116 return igb_mii_ioctl(netdev, ifr, cmd); 7194 return igb_mii_ioctl(netdev, ifr, cmd);
7195 case SIOCGHWTSTAMP:
7196 return igb_ptp_get_ts_config(netdev, ifr);
7117 case SIOCSHWTSTAMP: 7197 case SIOCSHWTSTAMP:
7118 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); 7198 return igb_ptp_set_ts_config(netdev, ifr);
7119 default: 7199 default:
7120 return -EOPNOTSUPP; 7200 return -EOPNOTSUPP;
7121 } 7201 }
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5a54e3dc535d..2cca8fd5e574 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -12,9 +12,8 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along 15 * You should have received a copy of the GNU General Public License along with
16 * with this program; if not, write to the Free Software Foundation, Inc., 16 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 17 */
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/device.h> 19#include <linux/device.h>
@@ -75,6 +74,8 @@
75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 74#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
76#define IGB_NBITS_82580 40 75#define IGB_NBITS_82580 40
77 76
77static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
78
78/* SYSTIM read access for the 82576 */ 79/* SYSTIM read access for the 82576 */
79static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) 80static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
80{ 81{
@@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
372 * This work function polls the TSYNCTXCTL valid bit to determine when a 373 * This work function polls the TSYNCTXCTL valid bit to determine when a
373 * timestamp has been taken for the current stored skb. 374 * timestamp has been taken for the current stored skb.
374 **/ 375 **/
375void igb_ptp_tx_work(struct work_struct *work) 376static void igb_ptp_tx_work(struct work_struct *work)
376{ 377{
377 struct igb_adapter *adapter = container_of(work, struct igb_adapter, 378 struct igb_adapter *adapter = container_of(work, struct igb_adapter,
378 ptp_tx_work); 379 ptp_tx_work);
@@ -386,6 +387,7 @@ void igb_ptp_tx_work(struct work_struct *work)
386 IGB_PTP_TX_TIMEOUT)) { 387 IGB_PTP_TX_TIMEOUT)) {
387 dev_kfree_skb_any(adapter->ptp_tx_skb); 388 dev_kfree_skb_any(adapter->ptp_tx_skb);
388 adapter->ptp_tx_skb = NULL; 389 adapter->ptp_tx_skb = NULL;
390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
389 adapter->tx_hwtstamp_timeouts++; 391 adapter->tx_hwtstamp_timeouts++;
390 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); 392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
391 return; 393 return;
@@ -466,7 +468,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
466 * available, then it must have been for this skb here because we only 468 * available, then it must have been for this skb here because we only
467 * allow only one such packet into the queue. 469 * allow only one such packet into the queue.
468 **/ 470 **/
469void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) 471static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
470{ 472{
471 struct e1000_hw *hw = &adapter->hw; 473 struct e1000_hw *hw = &adapter->hw;
472 struct skb_shared_hwtstamps shhwtstamps; 474 struct skb_shared_hwtstamps shhwtstamps;
@@ -479,6 +481,7 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
479 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); 481 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
480 dev_kfree_skb_any(adapter->ptp_tx_skb); 482 dev_kfree_skb_any(adapter->ptp_tx_skb);
481 adapter->ptp_tx_skb = NULL; 483 adapter->ptp_tx_skb = NULL;
484 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
482} 485}
483 486
484/** 487/**
@@ -540,10 +543,26 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
540} 543}
541 544
542/** 545/**
543 * igb_ptp_hwtstamp_ioctl - control hardware time stamping 546 * igb_ptp_get_ts_config - get hardware time stamping config
547 * @netdev:
548 * @ifreq:
549 *
550 * Get the hwtstamp_config settings to return to the user. Rather than attempt
551 * to deconstruct the settings from the registers, just return a shadow copy
552 * of the last known settings.
553 **/
554int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
555{
556 struct igb_adapter *adapter = netdev_priv(netdev);
557 struct hwtstamp_config *config = &adapter->tstamp_config;
558
559 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
560 -EFAULT : 0;
561}
562/**
563 * igb_ptp_set_ts_config - control hardware time stamping
544 * @netdev: 564 * @netdev:
545 * @ifreq: 565 * @ifreq:
546 * @cmd:
547 * 566 *
548 * Outgoing time stamping can be enabled and disabled. Play nice and 567 * Outgoing time stamping can be enabled and disabled. Play nice and
549 * disable it when requested, although it shouldn't case any overhead 568 * disable it when requested, although it shouldn't case any overhead
@@ -557,12 +576,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
557 * not supported, with the exception of "all V2 events regardless of 576 * not supported, with the exception of "all V2 events regardless of
558 * level 2 or 4". 577 * level 2 or 4".
559 **/ 578 **/
560int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 579int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
561 struct ifreq *ifr, int cmd)
562{ 580{
563 struct igb_adapter *adapter = netdev_priv(netdev); 581 struct igb_adapter *adapter = netdev_priv(netdev);
564 struct e1000_hw *hw = &adapter->hw; 582 struct e1000_hw *hw = &adapter->hw;
565 struct hwtstamp_config config; 583 struct hwtstamp_config *config = &adapter->tstamp_config;
566 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 584 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
567 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 585 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
568 u32 tsync_rx_cfg = 0; 586 u32 tsync_rx_cfg = 0;
@@ -570,14 +588,14 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
570 bool is_l2 = false; 588 bool is_l2 = false;
571 u32 regval; 589 u32 regval;
572 590
573 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 591 if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
574 return -EFAULT; 592 return -EFAULT;
575 593
576 /* reserved for future extensions */ 594 /* reserved for future extensions */
577 if (config.flags) 595 if (config->flags)
578 return -EINVAL; 596 return -EINVAL;
579 597
580 switch (config.tx_type) { 598 switch (config->tx_type) {
581 case HWTSTAMP_TX_OFF: 599 case HWTSTAMP_TX_OFF:
582 tsync_tx_ctl = 0; 600 tsync_tx_ctl = 0;
583 case HWTSTAMP_TX_ON: 601 case HWTSTAMP_TX_ON:
@@ -586,7 +604,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
586 return -ERANGE; 604 return -ERANGE;
587 } 605 }
588 606
589 switch (config.rx_filter) { 607 switch (config->rx_filter) {
590 case HWTSTAMP_FILTER_NONE: 608 case HWTSTAMP_FILTER_NONE:
591 tsync_rx_ctl = 0; 609 tsync_rx_ctl = 0;
592 break; 610 break;
@@ -610,7 +628,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
610 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 628 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
611 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 629 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
612 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; 630 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
613 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 631 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
614 is_l2 = true; 632 is_l2 = true;
615 is_l4 = true; 633 is_l4 = true;
616 break; 634 break;
@@ -621,12 +639,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
621 */ 639 */
622 if (hw->mac.type != e1000_82576) { 640 if (hw->mac.type != e1000_82576) {
623 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; 641 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
624 config.rx_filter = HWTSTAMP_FILTER_ALL; 642 config->rx_filter = HWTSTAMP_FILTER_ALL;
625 break; 643 break;
626 } 644 }
627 /* fall through */ 645 /* fall through */
628 default: 646 default:
629 config.rx_filter = HWTSTAMP_FILTER_NONE; 647 config->rx_filter = HWTSTAMP_FILTER_NONE;
630 return -ERANGE; 648 return -ERANGE;
631 } 649 }
632 650
@@ -643,7 +661,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
643 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { 661 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
644 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 662 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
645 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; 663 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
646 config.rx_filter = HWTSTAMP_FILTER_ALL; 664 config->rx_filter = HWTSTAMP_FILTER_ALL;
647 is_l2 = true; 665 is_l2 = true;
648 is_l4 = true; 666 is_l4 = true;
649 667
@@ -707,7 +725,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
707 regval = rd32(E1000_RXSTMPL); 725 regval = rd32(E1000_RXSTMPL);
708 regval = rd32(E1000_RXSTMPH); 726 regval = rd32(E1000_RXSTMPH);
709 727
710 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 728 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
711 -EFAULT : 0; 729 -EFAULT : 0;
712} 730}
713 731
@@ -798,7 +816,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
798 816
799 /* Initialize the time sync interrupts for devices that support it. */ 817 /* Initialize the time sync interrupts for devices that support it. */
800 if (hw->mac.type >= e1000_82580) { 818 if (hw->mac.type >= e1000_82580) {
801 wr32(E1000_TSIM, E1000_TSIM_TXTS); 819 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
802 wr32(E1000_IMS, E1000_IMS_TS); 820 wr32(E1000_IMS, E1000_IMS_TS);
803 } 821 }
804 822
@@ -841,6 +859,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
841 if (adapter->ptp_tx_skb) { 859 if (adapter->ptp_tx_skb) {
842 dev_kfree_skb_any(adapter->ptp_tx_skb); 860 dev_kfree_skb_any(adapter->ptp_tx_skb);
843 adapter->ptp_tx_skb = NULL; 861 adapter->ptp_tx_skb = NULL;
862 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
844 } 863 }
845 864
846 if (adapter->ptp_clock) { 865 if (adapter->ptp_clock) {
@@ -864,6 +883,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
864 if (!(adapter->flags & IGB_FLAG_PTP)) 883 if (!(adapter->flags & IGB_FLAG_PTP))
865 return; 884 return;
866 885
886 /* reset the tstamp_config */
887 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
888
867 switch (adapter->hw.mac.type) { 889 switch (adapter->hw.mac.type) {
868 case e1000_82576: 890 case e1000_82576:
869 /* Dial the nominal frequency. */ 891 /* Dial the nominal frequency. */
@@ -876,7 +898,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
876 case e1000_i211: 898 case e1000_i211:
877 /* Enable the timer functions and interrupts. */ 899 /* Enable the timer functions and interrupts. */
878 wr32(E1000_TSAUXC, 0x0); 900 wr32(E1000_TSAUXC, 0x0);
879 wr32(E1000_TSIM, E1000_TSIM_TXTS); 901 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
880 wr32(E1000_IMS, E1000_IMS_TS); 902 wr32(E1000_IMS, E1000_IMS_TS);
881 break; 903 break;
882 default: 904 default:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 675435fc2e53..b7ab03a2f28f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1043 for (i = 0; i < 3; i++) 1043 for (i = 0; i < 3; i++)
1044 adapter->msix_entries[i].entry = i; 1044 adapter->msix_entries[i].entry = i;
1045 1045
1046 err = pci_enable_msix(adapter->pdev, 1046 err = pci_enable_msix_range(adapter->pdev,
1047 adapter->msix_entries, 3); 1047 adapter->msix_entries, 3, 3);
1048 } 1048 }
1049 1049
1050 if (err) { 1050 if (err < 0) {
1051 /* MSI-X failed */ 1051 /* MSI-X failed */
1052 dev_err(&adapter->pdev->dev, 1052 dev_err(&adapter->pdev->dev,
1053 "Failed to initialize MSI-X interrupts.\n"); 1053 "Failed to initialize MSI-X interrupts.\n");
@@ -2014,12 +2014,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2014 2014
2015 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2016 switch (skb->protocol) { 2016 switch (skb->protocol) {
2017 case __constant_htons(ETH_P_IP): 2017 case htons(ETH_P_IP):
2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2020 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2020 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2021 break; 2021 break;
2022 case __constant_htons(ETH_P_IPV6): 2022 case htons(ETH_P_IPV6):
2023 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2023 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2024 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2024 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2025 break; 2025 break;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 57e390cbe6d0..f42c201f727f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1521 int tso; 1521 int tso;
1522 1522
1523 if (test_bit(__IXGB_DOWN, &adapter->flags)) { 1523 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1524 dev_kfree_skb(skb); 1524 dev_kfree_skb_any(skb);
1525 return NETDEV_TX_OK; 1525 return NETDEV_TX_OK;
1526 } 1526 }
1527 1527
1528 if (skb->len <= 0) { 1528 if (skb->len <= 0) {
1529 dev_kfree_skb(skb); 1529 dev_kfree_skb_any(skb);
1530 return NETDEV_TX_OK; 1530 return NETDEV_TX_OK;
1531 } 1531 }
1532 1532
@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1543 1543
1544 tso = ixgb_tso(adapter, skb); 1544 tso = ixgb_tso(adapter, skb);
1545 if (tso < 0) { 1545 if (tso < 0) {
1546 dev_kfree_skb(skb); 1546 dev_kfree_skb_any(skb);
1547 return NETDEV_TX_OK; 1547 return NETDEV_TX_OK;
1548 } 1548 }
1549 1549
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0186ea2969fe..55c53a1cbb62 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -765,6 +766,7 @@ struct ixgbe_adapter {
765 struct ptp_clock_info ptp_caps; 766 struct ptp_clock_info ptp_caps;
766 struct work_struct ptp_tx_work; 767 struct work_struct ptp_tx_work;
767 struct sk_buff *ptp_tx_skb; 768 struct sk_buff *ptp_tx_skb;
769 struct hwtstamp_config tstamp_config;
768 unsigned long ptp_tx_start; 770 unsigned long ptp_tx_start;
769 unsigned long last_overflow_check; 771 unsigned long last_overflow_check;
770 unsigned long last_rx_ptp_check; 772 unsigned long last_rx_ptp_check;
@@ -806,10 +808,12 @@ enum ixgbe_state_t {
806 __IXGBE_TESTING, 808 __IXGBE_TESTING,
807 __IXGBE_RESETTING, 809 __IXGBE_RESETTING,
808 __IXGBE_DOWN, 810 __IXGBE_DOWN,
811 __IXGBE_DISABLED,
809 __IXGBE_REMOVING, 812 __IXGBE_REMOVING,
810 __IXGBE_SERVICE_SCHED, 813 __IXGBE_SERVICE_SCHED,
811 __IXGBE_IN_SFP_INIT, 814 __IXGBE_IN_SFP_INIT,
812 __IXGBE_PTP_RUNNING, 815 __IXGBE_PTP_RUNNING,
816 __IXGBE_PTP_TX_IN_PROGRESS,
813}; 817};
814 818
815struct ixgbe_cb { 819struct ixgbe_cb {
@@ -884,7 +888,6 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
884 u16 soft_id); 888 u16 soft_id);
885void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 889void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
886 union ixgbe_atr_input *mask); 890 union ixgbe_atr_input *mask);
887bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
888void ixgbe_set_rx_mode(struct net_device *netdev); 891void ixgbe_set_rx_mode(struct net_device *netdev);
889#ifdef CONFIG_IXGBE_DCB 892#ifdef CONFIG_IXGBE_DCB
890void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 893void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -958,8 +961,8 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
958 rx_ring->last_rx_timestamp = jiffies; 961 rx_ring->last_rx_timestamp = jiffies;
959} 962}
960 963
961int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, 964int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
962 int cmd); 965int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
963void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 966void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
964void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); 967void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
965void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); 968void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index a26f3fee4f35..4c78ea8946c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -57,10 +58,12 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
57 **/ 58 **/
58static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 59static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
59{ 60{
60 struct ixgbe_adapter *adapter = hw->back;
61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
62 u16 pcie_devctl2; 62 u16 pcie_devctl2;
63 63
64 if (ixgbe_removed(hw->hw_addr))
65 return;
66
64 /* only take action if timeout value is defaulted to 0 */ 67 /* only take action if timeout value is defaulted to 0 */
65 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 68 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
66 goto out; 69 goto out;
@@ -79,11 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
79 * directly in order to set the completion timeout value for 82 * directly in order to set the completion timeout value for
80 * 16ms to 55ms 83 * 16ms to 55ms
81 */ 84 */
82 pci_read_config_word(adapter->pdev, 85 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
83 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
84 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 86 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
85 pci_write_config_word(adapter->pdev, 87 ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
86 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
87out: 88out:
88 /* disable completion timeout resend */ 89 /* disable completion timeout resend */
89 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 90 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
@@ -100,6 +101,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
100 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 101 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
101 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 102 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
102 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
104 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
103 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 105 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
104 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 106 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
105 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 107 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -201,8 +203,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
201 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 203 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
202 } 204 }
203 205
204 hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
205
206 /* set the completion timeout for interface */ 206 /* set the completion timeout for interface */
207 if (ret_val == 0) 207 if (ret_val == 0)
208 ixgbe_set_pcie_completion_timeout(hw); 208 ixgbe_set_pcie_completion_timeout(hw);
@@ -1237,14 +1237,14 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1237} 1237}
1238 1238
1239/** 1239/**
1240 * ixgbe_set_rxpba_82598 - Configure packet buffers 1240 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1241 * @hw: pointer to hardware structure 1241 * @hw: pointer to hardware structure
1242 * @dcb_config: pointer to ixgbe_dcb_config structure 1242 * @num_pb: number of packet buffers to allocate
1243 * 1243 * @headroom: reserve n KB of headroom
1244 * Configure packet buffers. 1244 * @strategy: packet buffer allocation strategy
1245 */ 1245 **/
1246static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, 1246static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1247 int strategy) 1247 u32 headroom, int strategy)
1248{ 1248{
1249 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1249 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1250 u8 i = 0; 1250 u8 i = 0;
@@ -1315,7 +1315,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1315 .release_swfw_sync = &ixgbe_release_swfw_sync, 1315 .release_swfw_sync = &ixgbe_release_swfw_sync,
1316 .get_thermal_sensor_data = NULL, 1316 .get_thermal_sensor_data = NULL,
1317 .init_thermal_sensor_thresh = NULL, 1317 .init_thermal_sensor_thresh = NULL,
1318 .mng_fw_enabled = NULL, 1318 .prot_autoc_read = &prot_autoc_read_generic,
1319 .prot_autoc_write = &prot_autoc_write_generic,
1319}; 1320};
1320 1321
1321static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1322static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index edda6814108c..f32b3dd1ba8e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -63,8 +64,10 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
63 u8 dev_addr, u8 *data); 64 u8 dev_addr, u8 *data);
64static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 65static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
65 u8 dev_addr, u8 data); 66 u8 dev_addr, u8 data);
67static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
68static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
66 69
67static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 70bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
68{ 71{
69 u32 fwsm, manc, factps; 72 u32 fwsm, manc, factps;
70 73
@@ -91,7 +94,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
91 * and MNG not enabled 94 * and MNG not enabled
92 */ 95 */
93 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
94 !hw->mng_fw_enabled) { 97 !ixgbe_mng_enabled(hw)) {
95 mac->ops.disable_tx_laser = 98 mac->ops.disable_tx_laser =
96 &ixgbe_disable_tx_laser_multispeed_fiber; 99 &ixgbe_disable_tx_laser_multispeed_fiber;
97 mac->ops.enable_tx_laser = 100 mac->ops.enable_tx_laser =
@@ -122,7 +125,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
122{ 125{
123 s32 ret_val = 0; 126 s32 ret_val = 0;
124 u16 list_offset, data_offset, data_value; 127 u16 list_offset, data_offset, data_value;
125 bool got_lock = false;
126 128
127 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 129 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
128 ixgbe_init_mac_link_ops_82599(hw); 130 ixgbe_init_mac_link_ops_82599(hw);
@@ -160,30 +162,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
160 usleep_range(hw->eeprom.semaphore_delay * 1000, 162 usleep_range(hw->eeprom.semaphore_delay * 1000,
161 hw->eeprom.semaphore_delay * 2000); 163 hw->eeprom.semaphore_delay * 2000);
162 164
163 /* Need SW/FW semaphore around AUTOC writes if LESM on,
164 * likewise reset_pipeline requires lock as it also writes
165 * AUTOC.
166 */
167 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
168 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
169 IXGBE_GSSR_MAC_CSR_SM);
170 if (ret_val)
171 goto setup_sfp_out;
172
173 got_lock = true;
174 }
175
176 /* Restart DSP and set SFI mode */ 165 /* Restart DSP and set SFI mode */
177 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) | 166 ret_val = hw->mac.ops.prot_autoc_write(hw,
178 IXGBE_AUTOC_LMS_10G_SERIAL)); 167 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
179 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 168 false);
180 ret_val = ixgbe_reset_pipeline_82599(hw);
181
182 if (got_lock) {
183 hw->mac.ops.release_swfw_sync(hw,
184 IXGBE_GSSR_MAC_CSR_SM);
185 got_lock = false;
186 }
187 169
188 if (ret_val) { 170 if (ret_val) {
189 hw_dbg(hw, " sfp module setup not complete\n"); 171 hw_dbg(hw, " sfp module setup not complete\n");
@@ -207,6 +189,81 @@ setup_sfp_err:
207 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 189 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
208} 190}
209 191
192/**
193 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
194 * @hw: pointer to hardware structure
195 * @locked: Return the if we locked for this read.
196 * @reg_val: Value we read from AUTOC
197 *
198 * For this part (82599) we need to wrap read-modify-writes with a possible
199 * FW/SW lock. It is assumed this lock will be freed with the next
200 * prot_autoc_write_82599(). Note, that locked can only be true in cases
201 * where this function doesn't return an error.
202 **/
203static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
204 u32 *reg_val)
205{
206 s32 ret_val;
207
208 *locked = false;
209 /* If LESM is on then we need to hold the SW/FW semaphore. */
210 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
211 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
212 IXGBE_GSSR_MAC_CSR_SM);
213 if (ret_val)
214 return IXGBE_ERR_SWFW_SYNC;
215
216 *locked = true;
217 }
218
219 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
220 return 0;
221}
222
223/**
224 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
225 * @hw: pointer to hardware structure
226 * @reg_val: value to write to AUTOC
227 * @locked: bool to indicate whether the SW/FW lock was already taken by
228 * previous proc_autoc_read_82599.
229 *
230 * This part (82599) may need to hold a the SW/FW lock around all writes to
231 * AUTOC. Likewise after a write we need to do a pipeline reset.
232 **/
233static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
234{
235 s32 ret_val = 0;
236
237 /* Blocked by MNG FW so bail */
238 if (ixgbe_check_reset_blocked(hw))
239 goto out;
240
241 /* We only need to get the lock if:
242 * - We didn't do it already (in the read part of a read-modify-write)
243 * - LESM is enabled.
244 */
245 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
246 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
247 IXGBE_GSSR_MAC_CSR_SM);
248 if (ret_val)
249 return IXGBE_ERR_SWFW_SYNC;
250
251 locked = true;
252 }
253
254 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
255 ret_val = ixgbe_reset_pipeline_82599(hw);
256
257out:
258 /* Free the SW/FW semaphore as we either grabbed it here or
259 * already had it when this function was called.
260 */
261 if (locked)
262 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
263
264 return ret_val;
265}
266
210static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 267static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
211{ 268{
212 struct ixgbe_mac_info *mac = &hw->mac; 269 struct ixgbe_mac_info *mac = &hw->mac;
@@ -216,6 +273,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
216 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; 273 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
217 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; 274 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
218 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 275 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
276 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
219 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 277 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
220 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 278 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
221 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 279 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -456,12 +514,20 @@ out:
456 * 514 *
457 * Disables link, should be called during D3 power down sequence. 515 * Disables link, should be called during D3 power down sequence.
458 * 516 *
459 */ 517 **/
460static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) 518static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
461{ 519{
462 u32 autoc2_reg; 520 u32 autoc2_reg, fwsm;
521 u16 ee_ctrl_2 = 0;
522
523 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
463 524
464 if (!hw->mng_fw_enabled && !hw->wol_enabled) { 525 /* Check to see if MNG FW could be enabled */
526 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
527
528 if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
529 !hw->wol_enabled &&
530 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
465 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 531 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
466 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; 532 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
467 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 533 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -542,6 +608,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
542{ 608{
543 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 609 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
544 610
611 /* Blocked by MNG FW so bail */
612 if (ixgbe_check_reset_blocked(hw))
613 return;
614
545 /* Disable tx laser; allow 100us to go dark per spec */ 615 /* Disable tx laser; allow 100us to go dark per spec */
546 esdp_reg |= IXGBE_ESDP_SDP3; 616 esdp_reg |= IXGBE_ESDP_SDP3;
547 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 617 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -582,6 +652,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
582 **/ 652 **/
583static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 653static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
584{ 654{
655 /* Blocked by MNG FW so bail */
656 if (ixgbe_check_reset_blocked(hw))
657 return;
658
585 if (hw->mac.autotry_restart) { 659 if (hw->mac.autotry_restart) {
586 ixgbe_disable_tx_laser_multispeed_fiber(hw); 660 ixgbe_disable_tx_laser_multispeed_fiber(hw);
587 ixgbe_enable_tx_laser_multispeed_fiber(hw); 661 ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -590,75 +664,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
590} 664}
591 665
592/** 666/**
593 * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
594 * @hw: pointer to hardware structure
595 * @speed: link speed to set
596 *
597 * We set the module speed differently for fixed fiber. For other
598 * multi-speed devices we don't have an error value so here if we
599 * detect an error we just log it and exit.
600 */
601static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
602 ixgbe_link_speed speed)
603{
604 s32 status;
605 u8 rs, eeprom_data;
606
607 switch (speed) {
608 case IXGBE_LINK_SPEED_10GB_FULL:
609 /* one bit mask same as setting on */
610 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
611 break;
612 case IXGBE_LINK_SPEED_1GB_FULL:
613 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
614 break;
615 default:
616 hw_dbg(hw, "Invalid fixed module speed\n");
617 return;
618 }
619
620 /* Set RS0 */
621 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
622 IXGBE_I2C_EEPROM_DEV_ADDR2,
623 &eeprom_data);
624 if (status) {
625 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
626 goto out;
627 }
628
629 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
630
631 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
632 IXGBE_I2C_EEPROM_DEV_ADDR2,
633 eeprom_data);
634 if (status) {
635 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
636 goto out;
637 }
638
639 /* Set RS1 */
640 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
641 IXGBE_I2C_EEPROM_DEV_ADDR2,
642 &eeprom_data);
643 if (status) {
644 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
645 goto out;
646 }
647
648 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
649
650 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
651 IXGBE_I2C_EEPROM_DEV_ADDR2,
652 eeprom_data);
653 if (status) {
654 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
655 goto out;
656 }
657out:
658 return;
659}
660
661/**
662 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 667 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
663 * @hw: pointer to hardware structure 668 * @hw: pointer to hardware structure
664 * @speed: new link speed 669 * @speed: new link speed
@@ -768,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
768 773
769 /* Set the module link speed */ 774 /* Set the module link speed */
770 switch (hw->phy.media_type) { 775 switch (hw->phy.media_type) {
771 case ixgbe_media_type_fiber_fixed:
772 ixgbe_set_fiber_fixed_speed(hw,
773 IXGBE_LINK_SPEED_1GB_FULL);
774 break;
775 case ixgbe_media_type_fiber: 776 case ixgbe_media_type_fiber:
776 esdp_reg &= ~IXGBE_ESDP_SDP5; 777 esdp_reg &= ~IXGBE_ESDP_SDP5;
777 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 778 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
@@ -941,8 +942,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
941 942
942out: 943out:
943 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 944 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
944 hw_dbg(hw, "Smartspeed has downgraded the link speed from " 945 hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
945 "the maximum advertised\n");
946 return status; 946 return status;
947} 947}
948 948
@@ -958,16 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
958 ixgbe_link_speed speed, 958 ixgbe_link_speed speed,
959 bool autoneg_wait_to_complete) 959 bool autoneg_wait_to_complete)
960{ 960{
961 bool autoneg = false;
961 s32 status = 0; 962 s32 status = 0;
962 u32 autoc, pma_pmd_1g, link_mode, start_autoc; 963 u32 pma_pmd_1g, link_mode, links_reg, i;
963 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 964 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
964 u32 orig_autoc = 0;
965 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 965 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
966 u32 links_reg;
967 u32 i;
968 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 966 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
969 bool got_lock = false; 967
970 bool autoneg = false; 968 /* holds the value of AUTOC register at this current point in time */
969 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
970 /* holds the cached value of AUTOC register */
971 u32 orig_autoc = 0;
972 /* temporary variable used for comparison purposes */
973 u32 autoc = current_autoc;
971 974
972 /* Check to see if speed passed in is supported. */ 975 /* Check to see if speed passed in is supported. */
973 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, 976 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -984,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
984 987
985 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 988 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
986 if (hw->mac.orig_link_settings_stored) 989 if (hw->mac.orig_link_settings_stored)
987 autoc = hw->mac.orig_autoc; 990 orig_autoc = hw->mac.orig_autoc;
988 else 991 else
989 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 992 orig_autoc = autoc;
990 993
991 orig_autoc = autoc;
992 start_autoc = hw->mac.cached_autoc;
993 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 994 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
994 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 995 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
995 996
@@ -1029,28 +1030,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1029 } 1030 }
1030 } 1031 }
1031 1032
1032 if (autoc != start_autoc) { 1033 if (autoc != current_autoc) {
1033 /* Need SW/FW semaphore around AUTOC writes if LESM is on,
1034 * likewise reset_pipeline requires us to hold this lock as
1035 * it also writes to AUTOC.
1036 */
1037 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1038 status = hw->mac.ops.acquire_swfw_sync(hw,
1039 IXGBE_GSSR_MAC_CSR_SM);
1040 if (status != 0)
1041 goto out;
1042
1043 got_lock = true;
1044 }
1045
1046 /* Restart link */ 1034 /* Restart link */
1047 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 1035 status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
1048 hw->mac.cached_autoc = autoc; 1036 if (status)
1049 ixgbe_reset_pipeline_82599(hw); 1037 goto out;
1050
1051 if (got_lock)
1052 hw->mac.ops.release_swfw_sync(hw,
1053 IXGBE_GSSR_MAC_CSR_SM);
1054 1038
1055 /* Only poll for autoneg to complete if specified to do so */ 1039 /* Only poll for autoneg to complete if specified to do so */
1056 if (autoneg_wait_to_complete) { 1040 if (autoneg_wait_to_complete) {
@@ -1068,8 +1052,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1068 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1069 status = 1053 status =
1070 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1071 hw_dbg(hw, "Autoneg did not " 1055 hw_dbg(hw, "Autoneg did not complete.\n");
1072 "complete.\n");
1073 } 1056 }
1074 } 1057 }
1075 } 1058 }
@@ -1117,7 +1100,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1117{ 1100{
1118 ixgbe_link_speed link_speed; 1101 ixgbe_link_speed link_speed;
1119 s32 status; 1102 s32 status;
1120 u32 ctrl, i, autoc2; 1103 u32 ctrl, i, autoc, autoc2;
1121 u32 curr_lms; 1104 u32 curr_lms;
1122 bool link_up = false; 1105 bool link_up = false;
1123 1106
@@ -1151,11 +1134,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1151 hw->phy.ops.reset(hw); 1134 hw->phy.ops.reset(hw);
1152 1135
1153 /* remember AUTOC from before we reset */ 1136 /* remember AUTOC from before we reset */
1154 if (hw->mac.cached_autoc) 1137 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1155 curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
1156 else
1157 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
1158 IXGBE_AUTOC_LMS_MASK;
1159 1138
1160mac_reset_top: 1139mac_reset_top:
1161 /* 1140 /*
@@ -1205,7 +1184,7 @@ mac_reset_top:
1205 * stored off yet. Otherwise restore the stored original 1184 * stored off yet. Otherwise restore the stored original
1206 * values since the reset operation sets back to defaults. 1185 * values since the reset operation sets back to defaults.
1207 */ 1186 */
1208 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1187 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1209 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1188 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1210 1189
1211 /* Enable link if disabled in NVM */ 1190 /* Enable link if disabled in NVM */
@@ -1216,7 +1195,7 @@ mac_reset_top:
1216 } 1195 }
1217 1196
1218 if (hw->mac.orig_link_settings_stored == false) { 1197 if (hw->mac.orig_link_settings_stored == false) {
1219 hw->mac.orig_autoc = hw->mac.cached_autoc; 1198 hw->mac.orig_autoc = autoc;
1220 hw->mac.orig_autoc2 = autoc2; 1199 hw->mac.orig_autoc2 = autoc2;
1221 hw->mac.orig_link_settings_stored = true; 1200 hw->mac.orig_link_settings_stored = true;
1222 } else { 1201 } else {
@@ -1227,34 +1206,18 @@ mac_reset_top:
1227 * Likewise if we support WoL we don't want change the 1206 * Likewise if we support WoL we don't want change the
1228 * LMS state either. 1207 * LMS state either.
1229 */ 1208 */
1230 if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) || 1209 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1231 hw->wol_enabled) 1210 hw->wol_enabled)
1232 hw->mac.orig_autoc = 1211 hw->mac.orig_autoc =
1233 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | 1212 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1234 curr_lms; 1213 curr_lms;
1235 1214
1236 if (hw->mac.cached_autoc != hw->mac.orig_autoc) { 1215 if (autoc != hw->mac.orig_autoc) {
1237 /* Need SW/FW semaphore around AUTOC writes if LESM is 1216 status = hw->mac.ops.prot_autoc_write(hw,
1238 * on, likewise reset_pipeline requires us to hold 1217 hw->mac.orig_autoc,
1239 * this lock as it also writes to AUTOC. 1218 false);
1240 */ 1219 if (status)
1241 bool got_lock = false; 1220 goto reset_hw_out;
1242 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1243 status = hw->mac.ops.acquire_swfw_sync(hw,
1244 IXGBE_GSSR_MAC_CSR_SM);
1245 if (status)
1246 goto reset_hw_out;
1247
1248 got_lock = true;
1249 }
1250
1251 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1252 hw->mac.cached_autoc = hw->mac.orig_autoc;
1253 ixgbe_reset_pipeline_82599(hw);
1254
1255 if (got_lock)
1256 hw->mac.ops.release_swfw_sync(hw,
1257 IXGBE_GSSR_MAC_CSR_SM);
1258 } 1221 }
1259 1222
1260 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1223 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1634,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1634{ 1597{
1635 1598
1636 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1599 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1637 u32 bucket_hash = 0; 1600 u32 bucket_hash = 0, hi_dword = 0;
1601 int i;
1638 1602
1639 /* Apply masks to input data */ 1603 /* Apply masks to input data */
1640 input->dword_stream[0] &= input_mask->dword_stream[0]; 1604 for (i = 0; i <= 10; i++)
1641 input->dword_stream[1] &= input_mask->dword_stream[1]; 1605 input->dword_stream[i] &= input_mask->dword_stream[i];
1642 input->dword_stream[2] &= input_mask->dword_stream[2];
1643 input->dword_stream[3] &= input_mask->dword_stream[3];
1644 input->dword_stream[4] &= input_mask->dword_stream[4];
1645 input->dword_stream[5] &= input_mask->dword_stream[5];
1646 input->dword_stream[6] &= input_mask->dword_stream[6];
1647 input->dword_stream[7] &= input_mask->dword_stream[7];
1648 input->dword_stream[8] &= input_mask->dword_stream[8];
1649 input->dword_stream[9] &= input_mask->dword_stream[9];
1650 input->dword_stream[10] &= input_mask->dword_stream[10];
1651 1606
1652 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1607 /* record the flow_vm_vlan bits as they are a key part to the hash */
1653 flow_vm_vlan = ntohl(input->dword_stream[0]); 1608 flow_vm_vlan = ntohl(input->dword_stream[0]);
1654 1609
1655 /* generate common hash dword */ 1610 /* generate common hash dword */
1656 hi_hash_dword = ntohl(input->dword_stream[1] ^ 1611 for (i = 1; i <= 10; i++)
1657 input->dword_stream[2] ^ 1612 hi_dword ^= input->dword_stream[i];
1658 input->dword_stream[3] ^ 1613 hi_hash_dword = ntohl(hi_dword);
1659 input->dword_stream[4] ^
1660 input->dword_stream[5] ^
1661 input->dword_stream[6] ^
1662 input->dword_stream[7] ^
1663 input->dword_stream[8] ^
1664 input->dword_stream[9] ^
1665 input->dword_stream[10]);
1666 1614
1667 /* low dword is word swapped version of common */ 1615 /* low dword is word swapped version of common */
1668 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1616 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1681,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1681 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1629 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1682 1630
1683 /* Process remaining 30 bit of the key */ 1631 /* Process remaining 30 bit of the key */
1684 IXGBE_COMPUTE_BKT_HASH_ITERATION(1); 1632 for (i = 1; i <= 15; i++)
1685 IXGBE_COMPUTE_BKT_HASH_ITERATION(2); 1633 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1686 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1687 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1688 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1689 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1690 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1691 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1692 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1693 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1694 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1695 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1696 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1697 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1698 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1699 1634
1700 /* 1635 /*
1701 * Limit hash to 13 bits since max bucket count is 8K. 1636 * Limit hash to 13 bits since max bucket count is 8K.
@@ -2001,7 +1936,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2001 1936
2002 /* We need to run link autotry after the driver loads */ 1937 /* We need to run link autotry after the driver loads */
2003 hw->mac.autotry_restart = true; 1938 hw->mac.autotry_restart = true;
2004 hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
2005 1939
2006 if (ret_val == 0) 1940 if (ret_val == 0)
2007 ret_val = ixgbe_verify_fw_version_82599(hw); 1941 ret_val = ixgbe_verify_fw_version_82599(hw);
@@ -2260,7 +2194,7 @@ fw_version_err:
2260 * Returns true if the LESM FW module is present and enabled. Otherwise 2194 * Returns true if the LESM FW module is present and enabled. Otherwise
2261 * returns false. Smart Speed must be disabled if LESM FW module is enabled. 2195 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2262 **/ 2196 **/
2263bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2197static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2264{ 2198{
2265 bool lesm_enabled = false; 2199 bool lesm_enabled = false;
2266 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2200 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2366,7 +2300,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2366 * full pipeline reset. Note - We must hold the SW/FW semaphore before writing 2300 * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
2367 * to AUTOC, so this function assumes the semaphore is held. 2301 * to AUTOC, so this function assumes the semaphore is held.
2368 **/ 2302 **/
2369s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) 2303static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2370{ 2304{
2371 s32 ret_val; 2305 s32 ret_val;
2372 u32 anlp1_reg = 0; 2306 u32 anlp1_reg = 0;
@@ -2380,11 +2314,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2380 IXGBE_WRITE_FLUSH(hw); 2314 IXGBE_WRITE_FLUSH(hw);
2381 } 2315 }
2382 2316
2383 autoc_reg = hw->mac.cached_autoc; 2317 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2384 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2318 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2385 2319
2386 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ 2320 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2387 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN); 2321 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2322 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2388 2323
2389 /* Wait for AN to leave state 0 */ 2324 /* Wait for AN to leave state 0 */
2390 for (i = 0; i < 10; i++) { 2325 for (i = 0; i < 10; i++) {
@@ -2565,7 +2500,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2565 .release_swfw_sync = &ixgbe_release_swfw_sync, 2500 .release_swfw_sync = &ixgbe_release_swfw_sync,
2566 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, 2501 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2567 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, 2502 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2568 .mng_fw_enabled = &ixgbe_mng_enabled, 2503 .prot_autoc_read = &prot_autoc_read_82599,
2504 .prot_autoc_write = &prot_autoc_write_82599,
2569}; 2505};
2570 2506
2571static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2507static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5c434b617b1..24fba39e194e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -72,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
72 bool link_up; 73 bool link_up;
73 74
74 switch (hw->phy.media_type) { 75 switch (hw->phy.media_type) {
75 case ixgbe_media_type_fiber_fixed:
76 case ixgbe_media_type_fiber: 76 case ixgbe_media_type_fiber:
77 hw->mac.ops.check_link(hw, &speed, &link_up, false); 77 hw->mac.ops.check_link(hw, &speed, &link_up, false);
78 /* if link is down, assume supported */ 78 /* if link is down, assume supported */
@@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
114 s32 ret_val = 0; 114 s32 ret_val = 0;
115 u32 reg = 0, reg_bp = 0; 115 u32 reg = 0, reg_bp = 0;
116 u16 reg_cu = 0; 116 u16 reg_cu = 0;
117 bool got_lock = false; 117 bool locked = false;
118 118
119 /* 119 /*
120 * Validate the requested mode. Strict IEEE mode does not allow 120 * Validate the requested mode. Strict IEEE mode does not allow
@@ -139,11 +139,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
139 * we link at 10G, the 1G advertisement is harmless and vice versa. 139 * we link at 10G, the 1G advertisement is harmless and vice versa.
140 */ 140 */
141 switch (hw->phy.media_type) { 141 switch (hw->phy.media_type) {
142 case ixgbe_media_type_fiber_fixed:
143 case ixgbe_media_type_fiber:
144 case ixgbe_media_type_backplane: 142 case ixgbe_media_type_backplane:
143 /* some MAC's need RMW protection on AUTOC */
144 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
145 if (ret_val)
146 goto out;
147
148 /* only backplane uses autoc so fall though */
149 case ixgbe_media_type_fiber:
145 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 150 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
146 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); 151
147 break; 152 break;
148 case ixgbe_media_type_copper: 153 case ixgbe_media_type_copper:
149 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 154 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
@@ -240,27 +245,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
240 * LESM is on, likewise reset_pipeline requries the lock as 245 * LESM is on, likewise reset_pipeline requries the lock as
241 * it also writes AUTOC. 246 * it also writes AUTOC.
242 */ 247 */
243 if ((hw->mac.type == ixgbe_mac_82599EB) && 248 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
244 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 249 if (ret_val)
245 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 250 goto out;
246 IXGBE_GSSR_MAC_CSR_SM);
247 if (ret_val)
248 goto out;
249
250 got_lock = true;
251 }
252
253 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
254
255 if (hw->mac.type == ixgbe_mac_82599EB)
256 ixgbe_reset_pipeline_82599(hw);
257
258 if (got_lock)
259 hw->mac.ops.release_swfw_sync(hw,
260 IXGBE_GSSR_MAC_CSR_SM);
261 251
262 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 252 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
263 ixgbe_device_supports_autoneg_fc(hw)) { 253 ixgbe_device_supports_autoneg_fc(hw)) {
264 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 254 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
265 MDIO_MMD_AN, reg_cu); 255 MDIO_MMD_AN, reg_cu);
266 } 256 }
@@ -656,20 +646,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
656 **/ 646 **/
657s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 647s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
658{ 648{
659 struct ixgbe_adapter *adapter = hw->back;
660 struct ixgbe_mac_info *mac = &hw->mac;
661 u16 link_status; 649 u16 link_status;
662 650
663 hw->bus.type = ixgbe_bus_type_pci_express; 651 hw->bus.type = ixgbe_bus_type_pci_express;
664 652
665 /* Get the negotiated link width and speed from PCI config space */ 653 /* Get the negotiated link width and speed from PCI config space */
666 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, 654 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
667 &link_status);
668 655
669 hw->bus.width = ixgbe_convert_bus_width(link_status); 656 hw->bus.width = ixgbe_convert_bus_width(link_status);
670 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 657 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
671 658
672 mac->ops.set_lan_id(hw); 659 hw->mac.ops.set_lan_id(hw);
673 660
674 return 0; 661 return 0;
675} 662}
@@ -2406,7 +2393,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2406 2393
2407 switch (hw->phy.media_type) { 2394 switch (hw->phy.media_type) {
2408 /* Autoneg flow control on fiber adapters */ 2395 /* Autoneg flow control on fiber adapters */
2409 case ixgbe_media_type_fiber_fixed:
2410 case ixgbe_media_type_fiber: 2396 case ixgbe_media_type_fiber:
2411 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2397 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2412 ret_val = ixgbe_fc_autoneg_fiber(hw); 2398 ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2437,6 +2423,53 @@ out:
2437} 2423}
2438 2424
2439/** 2425/**
2426 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2427 * @hw: pointer to hardware structure
2428 *
2429 * System-wide timeout range is encoded in PCIe Device Control2 register.
2430 *
2431 * Add 10% to specified maximum and return the number of times to poll for
2432 * completion timeout, in units of 100 microsec. Never return less than
2433 * 800 = 80 millisec.
2434 **/
2435static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2436{
2437 s16 devctl2;
2438 u32 pollcnt;
2439
2440 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
2441 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2442
2443 switch (devctl2) {
2444 case IXGBE_PCIDEVCTRL2_65_130ms:
2445 pollcnt = 1300; /* 130 millisec */
2446 break;
2447 case IXGBE_PCIDEVCTRL2_260_520ms:
2448 pollcnt = 5200; /* 520 millisec */
2449 break;
2450 case IXGBE_PCIDEVCTRL2_1_2s:
2451 pollcnt = 20000; /* 2 sec */
2452 break;
2453 case IXGBE_PCIDEVCTRL2_4_8s:
2454 pollcnt = 80000; /* 8 sec */
2455 break;
2456 case IXGBE_PCIDEVCTRL2_17_34s:
2457 pollcnt = 34000; /* 34 sec */
2458 break;
2459 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
2460 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
2461 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
2462 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
2463 default:
2464 pollcnt = 800; /* 80 millisec minimum */
2465 break;
2466 }
2467
2468 /* add 10% to spec maximum */
2469 return (pollcnt * 11) / 10;
2470}
2471
2472/**
2440 * ixgbe_disable_pcie_master - Disable PCI-express master access 2473 * ixgbe_disable_pcie_master - Disable PCI-express master access
2441 * @hw: pointer to hardware structure 2474 * @hw: pointer to hardware structure
2442 * 2475 *
@@ -2447,16 +2480,16 @@ out:
2447 **/ 2480 **/
2448static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2481static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2449{ 2482{
2450 struct ixgbe_adapter *adapter = hw->back;
2451 s32 status = 0; 2483 s32 status = 0;
2452 u32 i; 2484 u32 i, poll;
2453 u16 value; 2485 u16 value;
2454 2486
2455 /* Always set this bit to ensure any future transactions are blocked */ 2487 /* Always set this bit to ensure any future transactions are blocked */
2456 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2488 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2457 2489
2458 /* Exit if master requests are blocked */ 2490 /* Exit if master requests are blocked */
2459 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2491 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2492 ixgbe_removed(hw->hw_addr))
2460 goto out; 2493 goto out;
2461 2494
2462 /* Poll for master request bit to clear */ 2495 /* Poll for master request bit to clear */
@@ -2481,10 +2514,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2481 * Before proceeding, make sure that the PCIe block does not have 2514 * Before proceeding, make sure that the PCIe block does not have
2482 * transactions pending. 2515 * transactions pending.
2483 */ 2516 */
2484 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2517 poll = ixgbe_pcie_timeout_poll(hw);
2518 for (i = 0; i < poll; i++) {
2485 udelay(100); 2519 udelay(100);
2486 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, 2520 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
2487 &value); 2521 if (ixgbe_removed(hw->hw_addr))
2522 goto out;
2488 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2523 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2489 goto out; 2524 goto out;
2490 } 2525 }
@@ -2564,6 +2599,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2564} 2599}
2565 2600
2566/** 2601/**
2602 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2603 * @hw: pointer to hardware structure
2604 * @reg_val: Value we read from AUTOC
2605 * @locked: bool to indicate whether the SW/FW lock should be taken. Never
2606 * true in this the generic case.
2607 *
2608 * The default case requires no protection so just to the register read.
2609 **/
2610s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
2611{
2612 *locked = false;
2613 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2614 return 0;
2615}
2616
2617/**
2618 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2619 * @hw: pointer to hardware structure
2620 * @reg_val: value to write to AUTOC
2621 * @locked: bool to indicate whether the SW/FW lock was already taken by
2622 * previous read.
2623 **/
2624s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
2625{
2626 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2627 return 0;
2628}
2629
2630/**
2567 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2631 * ixgbe_disable_rx_buff_generic - Stops the receive data path
2568 * @hw: pointer to hardware structure 2632 * @hw: pointer to hardware structure
2569 * 2633 *
@@ -2641,6 +2705,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2641 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2705 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2642 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2706 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2643 s32 ret_val = 0; 2707 s32 ret_val = 0;
2708 bool locked = false;
2644 2709
2645 /* 2710 /*
2646 * Link must be up to auto-blink the LEDs; 2711 * Link must be up to auto-blink the LEDs;
@@ -2649,28 +2714,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2649 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2714 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2650 2715
2651 if (!link_up) { 2716 if (!link_up) {
2652 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 2717 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2653 * LESM is on. 2718 if (ret_val)
2654 */ 2719 goto out;
2655 bool got_lock = false;
2656
2657 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2658 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2659 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2660 IXGBE_GSSR_MAC_CSR_SM);
2661 if (ret_val)
2662 goto out;
2663 2720
2664 got_lock = true;
2665 }
2666 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2721 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2667 autoc_reg |= IXGBE_AUTOC_FLU; 2722 autoc_reg |= IXGBE_AUTOC_FLU;
2668 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2723
2724 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2725 if (ret_val)
2726 goto out;
2727
2669 IXGBE_WRITE_FLUSH(hw); 2728 IXGBE_WRITE_FLUSH(hw);
2670 2729
2671 if (got_lock)
2672 hw->mac.ops.release_swfw_sync(hw,
2673 IXGBE_GSSR_MAC_CSR_SM);
2674 usleep_range(10000, 20000); 2730 usleep_range(10000, 20000);
2675 } 2731 }
2676 2732
@@ -2690,33 +2746,21 @@ out:
2690 **/ 2746 **/
2691s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2747s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2692{ 2748{
2693 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2749 u32 autoc_reg = 0;
2694 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2750 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2695 s32 ret_val = 0; 2751 s32 ret_val = 0;
2696 bool got_lock = false; 2752 bool locked = false;
2697 2753
2698 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 2754 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2699 * LESM is on. 2755 if (ret_val)
2700 */ 2756 goto out;
2701 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2702 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2703 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2704 IXGBE_GSSR_MAC_CSR_SM);
2705 if (ret_val)
2706 goto out;
2707
2708 got_lock = true;
2709 }
2710 2757
2711 autoc_reg &= ~IXGBE_AUTOC_FLU; 2758 autoc_reg &= ~IXGBE_AUTOC_FLU;
2712 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2759 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2713 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2714
2715 if (hw->mac.type == ixgbe_mac_82599EB)
2716 ixgbe_reset_pipeline_82599(hw);
2717 2760
2718 if (got_lock) 2761 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2719 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 2762 if (ret_val)
2763 goto out;
2720 2764
2721 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2765 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2722 led_reg &= ~IXGBE_LED_BLINK(index); 2766 led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2817,7 +2861,6 @@ san_mac_addr_clr:
2817 **/ 2861 **/
2818u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2862u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2819{ 2863{
2820 struct ixgbe_adapter *adapter = hw->back;
2821 u16 msix_count = 1; 2864 u16 msix_count = 1;
2822 u16 max_msix_count; 2865 u16 max_msix_count;
2823 u16 pcie_offset; 2866 u16 pcie_offset;
@@ -2836,7 +2879,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2836 return msix_count; 2879 return msix_count;
2837 } 2880 }
2838 2881
2839 pci_read_config_word(adapter->pdev, pcie_offset, &msix_count); 2882 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
2883 if (ixgbe_removed(hw->hw_addr))
2884 msix_count = 0;
2840 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2885 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2841 2886
2842 /* MSI-X count is zero-based in HW */ 2887 /* MSI-X count is zero-based in HW */
@@ -2868,6 +2913,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2868 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2913 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2869 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2914 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2870 2915
2916 if (ixgbe_removed(hw->hw_addr))
2917 goto done;
2918
2871 if (!mpsar_lo && !mpsar_hi) 2919 if (!mpsar_lo && !mpsar_hi)
2872 goto done; 2920 goto done;
2873 2921
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f2e3919750ec..f12c40fb5537 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -98,6 +99,10 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
98 bool *link_up, bool link_up_wait_to_complete); 99 bool *link_up, bool link_up_wait_to_complete);
99s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
100 u16 *wwpn_prefix); 101 u16 *wwpn_prefix);
102
103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
105
101s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 106s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
102s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 107s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
103void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); 108void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
@@ -106,10 +111,10 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
106s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 111s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
107 u8 build, u8 ver); 112 u8 build, u8 ver);
108void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); 113void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
114bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
109 115
110void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, 116void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
111 u32 headroom, int strategy); 117 u32 headroom, int strategy);
112s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
113 118
114#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 119#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
115#define IXGBE_EMC_INTERNAL_DATA 0x00 120#define IXGBE_EMC_INTERNAL_DATA 0x00
@@ -125,6 +130,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
125s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); 130s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
126 131
127#define IXGBE_FAILED_READ_REG 0xffffffffU 132#define IXGBE_FAILED_READ_REG 0xffffffffU
133#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
134#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
135
136u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
137void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
128 138
129static inline bool ixgbe_removed(void __iomem *addr) 139static inline bool ixgbe_removed(void __iomem *addr)
130{ 140{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 05e23b80b5e3..bdb99b3b0f30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d71d9ce3e394..d5a1e3db0774 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index c5933f6dceee..472b0f450bf9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 043307024c4a..6c55c14d082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -1127,10 +1128,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1127 } 1128 }
1128 1129
1129 do { 1130 do {
1130 start = u64_stats_fetch_begin_bh(&ring->syncp); 1131 start = u64_stats_fetch_begin_irq(&ring->syncp);
1131 data[i] = ring->stats.packets; 1132 data[i] = ring->stats.packets;
1132 data[i+1] = ring->stats.bytes; 1133 data[i+1] = ring->stats.bytes;
1133 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1134 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1134 i += 2; 1135 i += 2;
1135#ifdef BP_EXTENDED_STATS 1136#ifdef BP_EXTENDED_STATS
1136 data[i] = ring->stats.yields; 1137 data[i] = ring->stats.yields;
@@ -1155,10 +1156,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1155 } 1156 }
1156 1157
1157 do { 1158 do {
1158 start = u64_stats_fetch_begin_bh(&ring->syncp); 1159 start = u64_stats_fetch_begin_irq(&ring->syncp);
1159 data[i] = ring->stats.packets; 1160 data[i] = ring->stats.packets;
1160 data[i+1] = ring->stats.bytes; 1161 data[i+1] = ring->stats.bytes;
1161 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1162 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1162 i += 2; 1163 i += 2;
1163#ifdef BP_EXTENDED_STATS 1164#ifdef BP_EXTENDED_STATS
1164 data[i] = ring->stats.yields; 1165 data[i] = ring->stats.yields;
@@ -1247,6 +1248,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1247 struct ixgbe_hw *hw = &adapter->hw; 1248 struct ixgbe_hw *hw = &adapter->hw;
1248 bool link_up; 1249 bool link_up;
1249 u32 link_speed = 0; 1250 u32 link_speed = 0;
1251
1252 if (ixgbe_removed(hw->hw_addr)) {
1253 *data = 1;
1254 return 1;
1255 }
1250 *data = 0; 1256 *data = 0;
1251 1257
1252 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1258 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
@@ -1969,6 +1975,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1969 data[1] = 1; 1975 data[1] = 1;
1970 data[2] = 1; 1976 data[2] = 1;
1971 data[3] = 1; 1977 data[3] = 1;
1978 data[4] = 1;
1972 eth_test->flags |= ETH_TEST_FL_FAILED; 1979 eth_test->flags |= ETH_TEST_FL_FAILED;
1973 return; 1980 return;
1974 } 1981 }
@@ -1988,6 +1995,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1988 data[1] = 1; 1995 data[1] = 1;
1989 data[2] = 1; 1996 data[2] = 1;
1990 data[3] = 1; 1997 data[3] = 1;
1998 data[4] = 1;
1991 eth_test->flags |= ETH_TEST_FL_FAILED; 1999 eth_test->flags |= ETH_TEST_FL_FAILED;
1992 clear_bit(__IXGBE_TESTING, 2000 clear_bit(__IXGBE_TESTING,
1993 &adapter->state); 2001 &adapter->state);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 08726177a3eb..25a3dfef33e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -407,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
407 408
408 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { 409 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
409 /* return 0 to bypass going to ULD for DDPed data */ 410 /* return 0 to bypass going to ULD for DDPed data */
410 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): 411 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
411 /* update length of DDPed data */ 412 /* update length of DDPed data */
412 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 413 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
413 rc = 0; 414 rc = 0;
414 break; 415 break;
415 /* unmap the sg list when FCPRSP is received */ 416 /* unmap the sg list when FCPRSP is received */
416 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): 417 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
417 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, 418 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
418 ddp->sgc, DMA_FROM_DEVICE); 419 ddp->sgc, DMA_FROM_DEVICE);
419 ddp->err = ddp_err; 420 ddp->err = ddp_err;
@@ -421,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
421 ddp->sgc = 0; 422 ddp->sgc = 0;
422 /* fall through */ 423 /* fall through */
423 /* if DDP length is present pass it through to ULD */ 424 /* if DDP length is present pass it through to ULD */
424 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): 425 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
425 /* update length of DDPed data */ 426 /* update length of DDPed data */
426 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 427 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
427 if (ddp->len) 428 if (ddp->len)
428 rc = ddp->len; 429 rc = ddp->len;
429 break; 430 break;
430 /* no match will return as an error */ 431 /* no match will return as an error */
431 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): 432 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
432 default: 433 default:
433 break; 434 break;
434 } 435 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 3a02759b5e95..b16cc786750d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 32e3eaaa160a..2067d392cc3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -698,7 +699,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
698static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 699static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
699 int vectors) 700 int vectors)
700{ 701{
701 int err, vector_threshold; 702 int vector_threshold;
702 703
703 /* We'll want at least 2 (vector_threshold): 704 /* We'll want at least 2 (vector_threshold):
704 * 1) TxQ[0] + RxQ[0] handler 705 * 1) TxQ[0] + RxQ[0] handler
@@ -712,18 +713,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
712 * Right now, we simply care about how many we'll get; we'll 713 * Right now, we simply care about how many we'll get; we'll
713 * set them up later while requesting irq's. 714 * set them up later while requesting irq's.
714 */ 715 */
715 while (vectors >= vector_threshold) { 716 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
716 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 717 vector_threshold, vectors);
717 vectors);
718 if (!err) /* Success in acquiring all requested vectors. */
719 break;
720 else if (err < 0)
721 vectors = 0; /* Nasty failure, quit now */
722 else /* err == number of vectors we should try again with */
723 vectors = err;
724 }
725 718
726 if (vectors < vector_threshold) { 719 if (vectors < 0) {
727 /* Can't allocate enough MSI-X interrupts? Oh well. 720 /* Can't allocate enough MSI-X interrupts? Oh well.
728 * This just means we'll go with either a single MSI 721 * This just means we'll go with either a single MSI
729 * vector or fall back to legacy interrupts. 722 * vector or fall back to legacy interrupts.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18076c4178b4..8436c651b735 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -67,7 +68,7 @@ static char ixgbe_default_device_descr[] =
67#define DRV_VERSION "3.19.1-k" 68#define DRV_VERSION "3.19.1-k"
68const char ixgbe_driver_version[] = DRV_VERSION; 69const char ixgbe_driver_version[] = DRV_VERSION;
69static const char ixgbe_copyright[] = 70static const char ixgbe_copyright[] =
70 "Copyright (c) 1999-2013 Intel Corporation."; 71 "Copyright (c) 1999-2014 Intel Corporation.";
71 72
72static const struct ixgbe_info *ixgbe_info_tbl[] = { 73static const struct ixgbe_info *ixgbe_info_tbl[] = {
73 [board_82598] = &ixgbe_82598_info, 74 [board_82598] = &ixgbe_82598_info,
@@ -151,6 +152,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
151MODULE_LICENSE("GPL"); 152MODULE_LICENSE("GPL");
152MODULE_VERSION(DRV_VERSION); 153MODULE_VERSION(DRV_VERSION);
153 154
155static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
156
154static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, 157static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
155 u32 reg, u16 *value) 158 u32 reg, u16 *value)
156{ 159{
@@ -169,6 +172,9 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
169 return -1; 172 return -1;
170 173
171 pcie_capability_read_word(parent_dev, reg, value); 174 pcie_capability_read_word(parent_dev, reg, value);
175 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
176 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
177 return -1;
172 return 0; 178 return 0;
173} 179}
174 180
@@ -313,6 +319,57 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
313 ixgbe_remove_adapter(hw); 319 ixgbe_remove_adapter(hw);
314} 320}
315 321
322static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
323{
324 u16 value;
325
326 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
327 if (value == IXGBE_FAILED_READ_CFG_WORD) {
328 ixgbe_remove_adapter(hw);
329 return true;
330 }
331 return false;
332}
333
334u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
335{
336 struct ixgbe_adapter *adapter = hw->back;
337 u16 value;
338
339 if (ixgbe_removed(hw->hw_addr))
340 return IXGBE_FAILED_READ_CFG_WORD;
341 pci_read_config_word(adapter->pdev, reg, &value);
342 if (value == IXGBE_FAILED_READ_CFG_WORD &&
343 ixgbe_check_cfg_remove(hw, adapter->pdev))
344 return IXGBE_FAILED_READ_CFG_WORD;
345 return value;
346}
347
348#ifdef CONFIG_PCI_IOV
349static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
350{
351 struct ixgbe_adapter *adapter = hw->back;
352 u32 value;
353
354 if (ixgbe_removed(hw->hw_addr))
355 return IXGBE_FAILED_READ_CFG_DWORD;
356 pci_read_config_dword(adapter->pdev, reg, &value);
357 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
358 ixgbe_check_cfg_remove(hw, adapter->pdev))
359 return IXGBE_FAILED_READ_CFG_DWORD;
360 return value;
361}
362#endif /* CONFIG_PCI_IOV */
363
364void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
365{
366 struct ixgbe_adapter *adapter = hw->back;
367
368 if (ixgbe_removed(hw->hw_addr))
369 return;
370 pci_write_config_word(adapter->pdev, reg, value);
371}
372
316static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) 373static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
317{ 374{
318 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 375 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1264,7 +1321,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1264 struct sk_buff *skb) 1321 struct sk_buff *skb)
1265{ 1322{
1266 if (ring->netdev->features & NETIF_F_RXHASH) 1323 if (ring->netdev->features & NETIF_F_RXHASH)
1267 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 1324 skb_set_hash(skb,
1325 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1326 PKT_HASH_TYPE_L3);
1268} 1327}
1269 1328
1270#ifdef IXGBE_FCOE 1329#ifdef IXGBE_FCOE
@@ -1480,7 +1539,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1480 hdr.network += ETH_HLEN; 1539 hdr.network += ETH_HLEN;
1481 1540
1482 /* handle any vlan tag if present */ 1541 /* handle any vlan tag if present */
1483 if (protocol == __constant_htons(ETH_P_8021Q)) { 1542 if (protocol == htons(ETH_P_8021Q)) {
1484 if ((hdr.network - data) > (max_len - VLAN_HLEN)) 1543 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1485 return max_len; 1544 return max_len;
1486 1545
@@ -1489,7 +1548,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1489 } 1548 }
1490 1549
1491 /* handle L3 protocols */ 1550 /* handle L3 protocols */
1492 if (protocol == __constant_htons(ETH_P_IP)) { 1551 if (protocol == htons(ETH_P_IP)) {
1493 if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) 1552 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1494 return max_len; 1553 return max_len;
1495 1554
@@ -1503,7 +1562,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1503 /* record next protocol if header is present */ 1562 /* record next protocol if header is present */
1504 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) 1563 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
1505 nexthdr = hdr.ipv4->protocol; 1564 nexthdr = hdr.ipv4->protocol;
1506 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 1565 } else if (protocol == htons(ETH_P_IPV6)) {
1507 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 1566 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1508 return max_len; 1567 return max_len;
1509 1568
@@ -1511,7 +1570,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1511 nexthdr = hdr.ipv6->nexthdr; 1570 nexthdr = hdr.ipv6->nexthdr;
1512 hlen = sizeof(struct ipv6hdr); 1571 hlen = sizeof(struct ipv6hdr);
1513#ifdef IXGBE_FCOE 1572#ifdef IXGBE_FCOE
1514 } else if (protocol == __constant_htons(ETH_P_FCOE)) { 1573 } else if (protocol == htons(ETH_P_FCOE)) {
1515 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) 1574 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1516 return max_len; 1575 return max_len;
1517 hlen = FCOE_HEADER_LEN; 1576 hlen = FCOE_HEADER_LEN;
@@ -2026,7 +2085,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2026#endif /* IXGBE_FCOE */ 2085#endif /* IXGBE_FCOE */
2027 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2086 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2028 2087
2029 do { 2088 while (likely(total_rx_packets < budget)) {
2030 union ixgbe_adv_rx_desc *rx_desc; 2089 union ixgbe_adv_rx_desc *rx_desc;
2031 struct sk_buff *skb; 2090 struct sk_buff *skb;
2032 2091
@@ -2101,7 +2160,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2101 2160
2102 /* update budget accounting */ 2161 /* update budget accounting */
2103 total_rx_packets++; 2162 total_rx_packets++;
2104 } while (likely(total_rx_packets < budget)); 2163 }
2105 2164
2106 u64_stats_update_begin(&rx_ring->syncp); 2165 u64_stats_update_begin(&rx_ring->syncp);
2107 rx_ring->stats.packets += total_rx_packets; 2166 rx_ring->stats.packets += total_rx_packets;
@@ -2630,9 +2689,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2630 switch (hw->mac.type) { 2689 switch (hw->mac.type) {
2631 case ixgbe_mac_82599EB: 2690 case ixgbe_mac_82599EB:
2632 case ixgbe_mac_X540: 2691 case ixgbe_mac_X540:
2633 if (eicr & IXGBE_EICR_ECC) 2692 if (eicr & IXGBE_EICR_ECC) {
2634 e_info(link, "Received unrecoverable ECC Err, please " 2693 e_info(link, "Received ECC Err, initiating reset\n");
2635 "reboot\n"); 2694 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2695 ixgbe_service_event_schedule(adapter);
2696 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2697 }
2636 /* Handle Flow Director Full threshold interrupt */ 2698 /* Handle Flow Director Full threshold interrupt */
2637 if (eicr & IXGBE_EICR_FLOW_DIR) { 2699 if (eicr & IXGBE_EICR_FLOW_DIR) {
2638 int reinit_count = 0; 2700 int reinit_count = 0;
@@ -2846,9 +2908,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2846 ixgbe_check_sfp_event(adapter, eicr); 2908 ixgbe_check_sfp_event(adapter, eicr);
2847 /* Fall through */ 2909 /* Fall through */
2848 case ixgbe_mac_X540: 2910 case ixgbe_mac_X540:
2849 if (eicr & IXGBE_EICR_ECC) 2911 if (eicr & IXGBE_EICR_ECC) {
2850 e_info(link, "Received unrecoverable ECC err, please " 2912 e_info(link, "Received ECC Err, initiating reset\n");
2851 "reboot\n"); 2913 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2914 ixgbe_service_event_schedule(adapter);
2915 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2916 }
2852 ixgbe_check_overtemp_event(adapter, eicr); 2917 ixgbe_check_overtemp_event(adapter, eicr);
2853 break; 2918 break;
2854 default: 2919 default:
@@ -4590,8 +4655,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4590static void ixgbe_up_complete(struct ixgbe_adapter *adapter) 4655static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4591{ 4656{
4592 struct ixgbe_hw *hw = &adapter->hw; 4657 struct ixgbe_hw *hw = &adapter->hw;
4593 struct net_device *upper;
4594 struct list_head *iter;
4595 int err; 4658 int err;
4596 u32 ctrl_ext; 4659 u32 ctrl_ext;
4597 4660
@@ -4633,19 +4696,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4633 e_crit(drv, "Fan has stopped, replace the adapter\n"); 4696 e_crit(drv, "Fan has stopped, replace the adapter\n");
4634 } 4697 }
4635 4698
4636 /* enable transmits */
4637 netif_tx_start_all_queues(adapter->netdev);
4638
4639 /* enable any upper devices */
4640 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4641 if (netif_is_macvlan(upper)) {
4642 struct macvlan_dev *vlan = netdev_priv(upper);
4643
4644 if (vlan->fwd_priv)
4645 netif_tx_start_all_queues(upper);
4646 }
4647 }
4648
4649 /* bring the link up in the watchdog, this could race with our first 4699 /* bring the link up in the watchdog, this could race with our first
4650 * link up interrupt but shouldn't be a problem */ 4700 * link up interrupt but shouldn't be a problem */
4651 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4701 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -5502,6 +5552,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5502 struct net_device *netdev = adapter->netdev; 5552 struct net_device *netdev = adapter->netdev;
5503 u32 err; 5553 u32 err;
5504 5554
5555 adapter->hw.hw_addr = adapter->io_addr;
5505 pci_set_power_state(pdev, PCI_D0); 5556 pci_set_power_state(pdev, PCI_D0);
5506 pci_restore_state(pdev); 5557 pci_restore_state(pdev);
5507 /* 5558 /*
@@ -5515,6 +5566,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
5515 e_dev_err("Cannot enable PCI device from suspend\n"); 5566 e_dev_err("Cannot enable PCI device from suspend\n");
5516 return err; 5567 return err;
5517 } 5568 }
5569 smp_mb__before_clear_bit();
5570 clear_bit(__IXGBE_DISABLED, &adapter->state);
5518 pci_set_master(pdev); 5571 pci_set_master(pdev);
5519 5572
5520 pci_wake_from_d3(pdev, false); 5573 pci_wake_from_d3(pdev, false);
@@ -5612,7 +5665,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5612 5665
5613 ixgbe_release_hw_control(adapter); 5666 ixgbe_release_hw_control(adapter);
5614 5667
5615 pci_disable_device(pdev); 5668 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
5669 pci_disable_device(pdev);
5616 5670
5617 return 0; 5671 return 0;
5618} 5672}
@@ -6016,6 +6070,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6016{ 6070{
6017 struct net_device *netdev = adapter->netdev; 6071 struct net_device *netdev = adapter->netdev;
6018 struct ixgbe_hw *hw = &adapter->hw; 6072 struct ixgbe_hw *hw = &adapter->hw;
6073 struct net_device *upper;
6074 struct list_head *iter;
6019 u32 link_speed = adapter->link_speed; 6075 u32 link_speed = adapter->link_speed;
6020 bool flow_rx, flow_tx; 6076 bool flow_rx, flow_tx;
6021 6077
@@ -6067,6 +6123,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6067 netif_carrier_on(netdev); 6123 netif_carrier_on(netdev);
6068 ixgbe_check_vf_rate_limit(adapter); 6124 ixgbe_check_vf_rate_limit(adapter);
6069 6125
6126 /* enable transmits */
6127 netif_tx_wake_all_queues(adapter->netdev);
6128
6129 /* enable any upper devices */
6130 rtnl_lock();
6131 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
6132 if (netif_is_macvlan(upper)) {
6133 struct macvlan_dev *vlan = netdev_priv(upper);
6134
6135 if (vlan->fwd_priv)
6136 netif_tx_wake_all_queues(upper);
6137 }
6138 }
6139 rtnl_unlock();
6140
6070 /* update the default user priority for VFs */ 6141 /* update the default user priority for VFs */
6071 ixgbe_update_default_up(adapter); 6142 ixgbe_update_default_up(adapter);
6072 6143
@@ -6454,7 +6525,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6454 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6525 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6455 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 6526 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6456 6527
6457 if (first->protocol == __constant_htons(ETH_P_IP)) { 6528 if (first->protocol == htons(ETH_P_IP)) {
6458 struct iphdr *iph = ip_hdr(skb); 6529 struct iphdr *iph = ip_hdr(skb);
6459 iph->tot_len = 0; 6530 iph->tot_len = 0;
6460 iph->check = 0; 6531 iph->check = 0;
@@ -6514,12 +6585,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6514 } else { 6585 } else {
6515 u8 l4_hdr = 0; 6586 u8 l4_hdr = 0;
6516 switch (first->protocol) { 6587 switch (first->protocol) {
6517 case __constant_htons(ETH_P_IP): 6588 case htons(ETH_P_IP):
6518 vlan_macip_lens |= skb_network_header_len(skb); 6589 vlan_macip_lens |= skb_network_header_len(skb);
6519 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 6590 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6520 l4_hdr = ip_hdr(skb)->protocol; 6591 l4_hdr = ip_hdr(skb)->protocol;
6521 break; 6592 break;
6522 case __constant_htons(ETH_P_IPV6): 6593 case htons(ETH_P_IPV6):
6523 vlan_macip_lens |= skb_network_header_len(skb); 6594 vlan_macip_lens |= skb_network_header_len(skb);
6524 l4_hdr = ipv6_hdr(skb)->nexthdr; 6595 l4_hdr = ipv6_hdr(skb)->nexthdr;
6525 break; 6596 break;
@@ -6794,9 +6865,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
6794 hdr.network = skb_network_header(first->skb); 6865 hdr.network = skb_network_header(first->skb);
6795 6866
6796 /* Currently only IPv4/IPv6 with TCP is supported */ 6867 /* Currently only IPv4/IPv6 with TCP is supported */
6797 if ((first->protocol != __constant_htons(ETH_P_IPV6) || 6868 if ((first->protocol != htons(ETH_P_IPV6) ||
6798 hdr.ipv6->nexthdr != IPPROTO_TCP) && 6869 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6799 (first->protocol != __constant_htons(ETH_P_IP) || 6870 (first->protocol != htons(ETH_P_IP) ||
6800 hdr.ipv4->protocol != IPPROTO_TCP)) 6871 hdr.ipv4->protocol != IPPROTO_TCP))
6801 return; 6872 return;
6802 6873
@@ -6829,12 +6900,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
6829 * and write the value to source port portion of compressed dword 6900 * and write the value to source port portion of compressed dword
6830 */ 6901 */
6831 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) 6902 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
6832 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); 6903 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
6833 else 6904 else
6834 common.port.src ^= th->dest ^ first->protocol; 6905 common.port.src ^= th->dest ^ first->protocol;
6835 common.port.dst ^= th->source; 6906 common.port.dst ^= th->source;
6836 6907
6837 if (first->protocol == __constant_htons(ETH_P_IP)) { 6908 if (first->protocol == htons(ETH_P_IP)) {
6838 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 6909 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6839 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; 6910 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6840 } else { 6911 } else {
@@ -6900,8 +6971,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6900 * or FIP and we have FCoE enabled on the adapter 6971 * or FIP and we have FCoE enabled on the adapter
6901 */ 6972 */
6902 switch (vlan_get_protocol(skb)) { 6973 switch (vlan_get_protocol(skb)) {
6903 case __constant_htons(ETH_P_FCOE): 6974 case htons(ETH_P_FCOE):
6904 case __constant_htons(ETH_P_FIP): 6975 case htons(ETH_P_FIP):
6905 adapter = netdev_priv(dev); 6976 adapter = netdev_priv(dev);
6906 6977
6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6978 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
@@ -6962,7 +7033,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6962 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; 7033 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6963 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7034 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6964 /* else if it is a SW VLAN check the next protocol and store the tag */ 7035 /* else if it is a SW VLAN check the next protocol and store the tag */
6965 } else if (protocol == __constant_htons(ETH_P_8021Q)) { 7036 } else if (protocol == htons(ETH_P_8021Q)) {
6966 struct vlan_hdr *vhdr, _vhdr; 7037 struct vlan_hdr *vhdr, _vhdr;
6967 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 7038 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6968 if (!vhdr) 7039 if (!vhdr)
@@ -6974,9 +7045,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6974 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7045 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
6975 } 7046 }
6976 7047
6977 skb_tx_timestamp(skb); 7048 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
6978 7049 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
6979 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 7050 &adapter->state))) {
6980 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 7051 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6981 tx_flags |= IXGBE_TX_FLAGS_TSTAMP; 7052 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6982 7053
@@ -6986,6 +7057,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6986 schedule_work(&adapter->ptp_tx_work); 7057 schedule_work(&adapter->ptp_tx_work);
6987 } 7058 }
6988 7059
7060 skb_tx_timestamp(skb);
7061
6989#ifdef CONFIG_PCI_IOV 7062#ifdef CONFIG_PCI_IOV
6990 /* 7063 /*
6991 * Use the l2switch_enable flag - would be false if the DMA 7064 * Use the l2switch_enable flag - would be false if the DMA
@@ -7021,7 +7094,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7021 7094
7022#ifdef IXGBE_FCOE 7095#ifdef IXGBE_FCOE
7023 /* setup tx offload for FCoE */ 7096 /* setup tx offload for FCoE */
7024 if ((protocol == __constant_htons(ETH_P_FCOE)) && 7097 if ((protocol == htons(ETH_P_FCOE)) &&
7025 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { 7098 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
7026 tso = ixgbe_fso(tx_ring, first, &hdr_len); 7099 tso = ixgbe_fso(tx_ring, first, &hdr_len);
7027 if (tso < 0) 7100 if (tso < 0)
@@ -7143,7 +7216,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7143 7216
7144 switch (cmd) { 7217 switch (cmd) {
7145 case SIOCSHWTSTAMP: 7218 case SIOCSHWTSTAMP:
7146 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); 7219 return ixgbe_ptp_set_ts_config(adapter, req);
7220 case SIOCGHWTSTAMP:
7221 return ixgbe_ptp_get_ts_config(adapter, req);
7147 default: 7222 default:
7148 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 7223 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7149 } 7224 }
@@ -7234,10 +7309,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7234 7309
7235 if (ring) { 7310 if (ring) {
7236 do { 7311 do {
7237 start = u64_stats_fetch_begin_bh(&ring->syncp); 7312 start = u64_stats_fetch_begin_irq(&ring->syncp);
7238 packets = ring->stats.packets; 7313 packets = ring->stats.packets;
7239 bytes = ring->stats.bytes; 7314 bytes = ring->stats.bytes;
7240 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 7315 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7241 stats->rx_packets += packets; 7316 stats->rx_packets += packets;
7242 stats->rx_bytes += bytes; 7317 stats->rx_bytes += bytes;
7243 } 7318 }
@@ -7250,10 +7325,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7250 7325
7251 if (ring) { 7326 if (ring) {
7252 do { 7327 do {
7253 start = u64_stats_fetch_begin_bh(&ring->syncp); 7328 start = u64_stats_fetch_begin_irq(&ring->syncp);
7254 packets = ring->stats.packets; 7329 packets = ring->stats.packets;
7255 bytes = ring->stats.bytes; 7330 bytes = ring->stats.bytes;
7256 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 7331 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7257 stats->tx_packets += packets; 7332 stats->tx_packets += packets;
7258 stats->tx_bytes += bytes; 7333 stats->tx_bytes += bytes;
7259 } 7334 }
@@ -7792,6 +7867,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7792 case IXGBE_DEV_ID_82599_SFP: 7867 case IXGBE_DEV_ID_82599_SFP:
7793 /* Only these subdevices could supports WOL */ 7868 /* Only these subdevices could supports WOL */
7794 switch (subdevice_id) { 7869 switch (subdevice_id) {
7870 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
7795 case IXGBE_SUBDEV_ID_82599_560FLR: 7871 case IXGBE_SUBDEV_ID_82599_560FLR:
7796 /* only support first port */ 7872 /* only support first port */
7797 if (hw->bus.func != 0) 7873 if (hw->bus.func != 0)
@@ -7969,10 +8045,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7969 if (err) 8045 if (err)
7970 goto err_sw_init; 8046 goto err_sw_init;
7971 8047
7972 /* Cache if MNG FW is up so we don't have to read the REG later */
7973 if (hw->mac.ops.mng_fw_enabled)
7974 hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
7975
7976 /* Make it possible the adapter to be woken up via WOL */ 8048 /* Make it possible the adapter to be woken up via WOL */
7977 switch (adapter->hw.mac.type) { 8049 switch (adapter->hw.mac.type) {
7978 case ixgbe_mac_82599EB: 8050 case ixgbe_mac_82599EB:
@@ -8223,7 +8295,7 @@ skip_sriov:
8223 ixgbe_dbg_adapter_init(adapter); 8295 ixgbe_dbg_adapter_init(adapter);
8224 8296
8225 /* Need link setup for MNG FW, else wait for IXGBE_UP */ 8297 /* Need link setup for MNG FW, else wait for IXGBE_UP */
8226 if (hw->mng_fw_enabled && hw->mac.ops.setup_link) 8298 if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
8227 hw->mac.ops.setup_link(hw, 8299 hw->mac.ops.setup_link(hw,
8228 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 8300 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8229 true); 8301 true);
@@ -8244,7 +8316,8 @@ err_alloc_etherdev:
8244 pci_select_bars(pdev, IORESOURCE_MEM)); 8316 pci_select_bars(pdev, IORESOURCE_MEM));
8245err_pci_reg: 8317err_pci_reg:
8246err_dma: 8318err_dma:
8247 pci_disable_device(pdev); 8319 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
8320 pci_disable_device(pdev);
8248 return err; 8321 return err;
8249} 8322}
8250 8323
@@ -8313,7 +8386,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
8313 8386
8314 pci_disable_pcie_error_reporting(pdev); 8387 pci_disable_pcie_error_reporting(pdev);
8315 8388
8316 pci_disable_device(pdev); 8389 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
8390 pci_disable_device(pdev);
8317} 8391}
8318 8392
8319/** 8393/**
@@ -8331,6 +8405,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8331 struct net_device *netdev = adapter->netdev; 8405 struct net_device *netdev = adapter->netdev;
8332 8406
8333#ifdef CONFIG_PCI_IOV 8407#ifdef CONFIG_PCI_IOV
8408 struct ixgbe_hw *hw = &adapter->hw;
8334 struct pci_dev *bdev, *vfdev; 8409 struct pci_dev *bdev, *vfdev;
8335 u32 dw0, dw1, dw2, dw3; 8410 u32 dw0, dw1, dw2, dw3;
8336 int vf, pos; 8411 int vf, pos;
@@ -8351,10 +8426,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8351 if (!pos) 8426 if (!pos)
8352 goto skip_bad_vf_detection; 8427 goto skip_bad_vf_detection;
8353 8428
8354 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); 8429 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
8355 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); 8430 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
8356 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); 8431 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
8357 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); 8432 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
8433 if (ixgbe_removed(hw->hw_addr))
8434 goto skip_bad_vf_detection;
8358 8435
8359 req_id = dw1 >> 16; 8436 req_id = dw1 >> 16;
8360 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ 8437 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
@@ -8417,14 +8494,20 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8417 8494
8418skip_bad_vf_detection: 8495skip_bad_vf_detection:
8419#endif /* CONFIG_PCI_IOV */ 8496#endif /* CONFIG_PCI_IOV */
8497 rtnl_lock();
8420 netif_device_detach(netdev); 8498 netif_device_detach(netdev);
8421 8499
8422 if (state == pci_channel_io_perm_failure) 8500 if (state == pci_channel_io_perm_failure) {
8501 rtnl_unlock();
8423 return PCI_ERS_RESULT_DISCONNECT; 8502 return PCI_ERS_RESULT_DISCONNECT;
8503 }
8424 8504
8425 if (netif_running(netdev)) 8505 if (netif_running(netdev))
8426 ixgbe_down(adapter); 8506 ixgbe_down(adapter);
8427 pci_disable_device(pdev); 8507
8508 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
8509 pci_disable_device(pdev);
8510 rtnl_unlock();
8428 8511
8429 /* Request a slot reset. */ 8512 /* Request a slot reset. */
8430 return PCI_ERS_RESULT_NEED_RESET; 8513 return PCI_ERS_RESULT_NEED_RESET;
@@ -8446,6 +8529,9 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
8446 e_err(probe, "Cannot re-enable PCI device after reset.\n"); 8529 e_err(probe, "Cannot re-enable PCI device after reset.\n");
8447 result = PCI_ERS_RESULT_DISCONNECT; 8530 result = PCI_ERS_RESULT_DISCONNECT;
8448 } else { 8531 } else {
8532 smp_mb__before_clear_bit();
8533 clear_bit(__IXGBE_DISABLED, &adapter->state);
8534 adapter->hw.hw_addr = adapter->io_addr;
8449 pci_set_master(pdev); 8535 pci_set_master(pdev);
8450 pci_restore_state(pdev); 8536 pci_restore_state(pdev);
8451 pci_save_state(pdev); 8537 pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index cc3101afd29f..f5c6af2b891b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index e44ff47659b5..a9b9ad69ed0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 132557c318f8..23f765263f12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -98,6 +99,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
98} 99}
99 100
100/** 101/**
102 * ixgbe_check_reset_blocked - check status of MNG FW veto bit
103 * @hw: pointer to the hardware structure
104 *
105 * This function checks the MMNGC.MNG_VETO bit to see if there are
106 * any constraints on link from manageability. For MAC's that don't
107 * have this bit just return false since the link can not be blocked
108 * via this method.
109 **/
110bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
111{
112 u32 mmngc;
113
114 /* If we don't have this bit, it can't be blocking */
115 if (hw->mac.type == ixgbe_mac_82598EB)
116 return false;
117
118 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
119 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
120 hw_dbg(hw, "MNG_VETO bit detected.\n");
121 return true;
122 }
123
124 return false;
125}
126
127/**
101 * ixgbe_get_phy_id - Get the phy type 128 * ixgbe_get_phy_id - Get the phy type
102 * @hw: pointer to hardware structure 129 * @hw: pointer to hardware structure
103 * 130 *
@@ -172,6 +199,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
172 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 199 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
173 goto out; 200 goto out;
174 201
202 /* Blocked by MNG FW so bail */
203 if (ixgbe_check_reset_blocked(hw))
204 goto out;
205
175 /* 206 /*
176 * Perform soft PHY reset to the PHY_XS. 207 * Perform soft PHY reset to the PHY_XS.
177 * This will cause a soft reset to the PHY 208 * This will cause a soft reset to the PHY
@@ -476,6 +507,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
476 autoneg_reg); 507 autoneg_reg);
477 } 508 }
478 509
510 /* Blocked by MNG FW so don't reset PHY */
511 if (ixgbe_check_reset_blocked(hw))
512 return status;
513
479 /* Restart PHY autonegotiation and wait for completion */ 514 /* Restart PHY autonegotiation and wait for completion */
480 hw->phy.ops.read_reg(hw, MDIO_CTRL1, 515 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
481 MDIO_MMD_AN, &autoneg_reg); 516 MDIO_MMD_AN, &autoneg_reg);
@@ -682,6 +717,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
682 autoneg_reg); 717 autoneg_reg);
683 } 718 }
684 719
720 /* Blocked by MNG FW so don't reset PHY */
721 if (ixgbe_check_reset_blocked(hw))
722 return status;
723
685 /* Restart PHY autonegotiation and wait for completion */ 724 /* Restart PHY autonegotiation and wait for completion */
686 hw->phy.ops.read_reg(hw, MDIO_CTRL1, 725 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
687 MDIO_MMD_AN, &autoneg_reg); 726 MDIO_MMD_AN, &autoneg_reg);
@@ -759,6 +798,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
759 s32 ret_val = 0; 798 s32 ret_val = 0;
760 u32 i; 799 u32 i;
761 800
801 /* Blocked by MNG FW so bail */
802 if (ixgbe_check_reset_blocked(hw))
803 goto out;
804
762 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); 805 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
763 806
764 /* reset the PHY and poll for completion */ 807 /* reset the PHY and poll for completion */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index fffcbdd2bf0e..0bb047f751c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -65,9 +66,6 @@
65#define IXGBE_SFF_1GBASET_CAPABLE 0x8 66#define IXGBE_SFF_1GBASET_CAPABLE 0x8
66#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 67#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
67#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 68#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
68#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
69#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
70#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
71#define IXGBE_SFF_ADDRESSING_MODE 0x4 69#define IXGBE_SFF_ADDRESSING_MODE 0x4
72#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 70#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
73#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 71#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
@@ -79,7 +77,6 @@
79#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 77#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
80#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 78#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
81#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 79#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
82
83/* Flow control defines */ 80/* Flow control defines */
84#define IXGBE_TAF_SYM_PAUSE 0x400 81#define IXGBE_TAF_SYM_PAUSE 0x400
85#define IXGBE_TAF_ASM_PAUSE 0x800 82#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -131,6 +128,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
131s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
132 ixgbe_link_speed *speed, 129 ixgbe_link_speed *speed,
133 bool *autoneg); 130 bool *autoneg);
131bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
134 132
135/* PHY specific */ 133/* PHY specific */
136s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5184e2a1a7d8..63515a6f67fa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -492,6 +493,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
492 493
493 dev_kfree_skb_any(adapter->ptp_tx_skb); 494 dev_kfree_skb_any(adapter->ptp_tx_skb);
494 adapter->ptp_tx_skb = NULL; 495 adapter->ptp_tx_skb = NULL;
496 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
495} 497}
496 498
497/** 499/**
@@ -511,13 +513,10 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
511 IXGBE_PTP_TX_TIMEOUT); 513 IXGBE_PTP_TX_TIMEOUT);
512 u32 tsynctxctl; 514 u32 tsynctxctl;
513 515
514 /* we have to have a valid skb */
515 if (!adapter->ptp_tx_skb)
516 return;
517
518 if (timeout) { 516 if (timeout) {
519 dev_kfree_skb_any(adapter->ptp_tx_skb); 517 dev_kfree_skb_any(adapter->ptp_tx_skb);
520 adapter->ptp_tx_skb = NULL; 518 adapter->ptp_tx_skb = NULL;
519 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
521 e_warn(drv, "clearing Tx Timestamp hang"); 520 e_warn(drv, "clearing Tx Timestamp hang");
522 return; 521 return;
523 } 522 }
@@ -576,14 +575,21 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
576 shhwtstamps->hwtstamp = ns_to_ktime(ns); 575 shhwtstamps->hwtstamp = ns_to_ktime(ns);
577} 576}
578 577
578int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
579{
580 struct hwtstamp_config *config = &adapter->tstamp_config;
581
582 return copy_to_user(ifr->ifr_data, config,
583 sizeof(*config)) ? -EFAULT : 0;
584}
585
579/** 586/**
580 * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping 587 * ixgbe_ptp_set_ts_config - control hardware time stamping
581 * @adapter: pointer to adapter struct 588 * @adapter: pointer to adapter struct
582 * @ifreq: ioctl data 589 * @ifreq: ioctl data
583 * @cmd: particular ioctl requested
584 * 590 *
585 * Outgoing time stamping can be enabled and disabled. Play nice and 591 * Outgoing time stamping can be enabled and disabled. Play nice and
586 * disable it when requested, although it shouldn't case any overhead 592 * disable it when requested, although it shouldn't cause any overhead
587 * when no packet needs it. At most one packet in the queue may be 593 * when no packet needs it. At most one packet in the queue may be
588 * marked for time stamping, otherwise it would be impossible to tell 594 * marked for time stamping, otherwise it would be impossible to tell
589 * for sure to which packet the hardware time stamp belongs. 595 * for sure to which packet the hardware time stamp belongs.
@@ -599,8 +605,7 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
599 * Event mode. This more accurately tells the user what the hardware is going 605 * Event mode. This more accurately tells the user what the hardware is going
600 * to do anyways. 606 * to do anyways.
601 */ 607 */
602int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 608int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
603 struct ifreq *ifr, int cmd)
604{ 609{
605 struct ixgbe_hw *hw = &adapter->hw; 610 struct ixgbe_hw *hw = &adapter->hw;
606 struct hwtstamp_config config; 611 struct hwtstamp_config config;
@@ -702,6 +707,10 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
702 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); 707 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
703 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 708 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
704 709
710 /* save these settings for future reference */
711 memcpy(&adapter->tstamp_config, &config,
712 sizeof(adapter->tstamp_config));
713
705 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 714 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
706 -EFAULT : 0; 715 -EFAULT : 0;
707} 716}
@@ -809,6 +818,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
809 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); 818 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
810 IXGBE_WRITE_FLUSH(hw); 819 IXGBE_WRITE_FLUSH(hw);
811 820
821 /* Reset the saved tstamp_config */
822 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
823
812 ixgbe_ptp_start_cyclecounter(adapter); 824 ixgbe_ptp_start_cyclecounter(adapter);
813 825
814 spin_lock_irqsave(&adapter->tmreg_lock, flags); 826 spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -840,7 +852,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
840 852
841 switch (adapter->hw.mac.type) { 853 switch (adapter->hw.mac.type) {
842 case ixgbe_mac_X540: 854 case ixgbe_mac_X540:
843 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); 855 snprintf(adapter->ptp_caps.name,
856 sizeof(adapter->ptp_caps.name),
857 "%s", netdev->name);
844 adapter->ptp_caps.owner = THIS_MODULE; 858 adapter->ptp_caps.owner = THIS_MODULE;
845 adapter->ptp_caps.max_adj = 250000000; 859 adapter->ptp_caps.max_adj = 250000000;
846 adapter->ptp_caps.n_alarm = 0; 860 adapter->ptp_caps.n_alarm = 0;
@@ -854,7 +868,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
854 adapter->ptp_caps.enable = ixgbe_ptp_enable; 868 adapter->ptp_caps.enable = ixgbe_ptp_enable;
855 break; 869 break;
856 case ixgbe_mac_82599EB: 870 case ixgbe_mac_82599EB:
857 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); 871 snprintf(adapter->ptp_caps.name,
872 sizeof(adapter->ptp_caps.name),
873 "%s", netdev->name);
858 adapter->ptp_caps.owner = THIS_MODULE; 874 adapter->ptp_caps.owner = THIS_MODULE;
859 adapter->ptp_caps.max_adj = 250000000; 875 adapter->ptp_caps.max_adj = 250000000;
860 adapter->ptp_caps.n_alarm = 0; 876 adapter->ptp_caps.n_alarm = 0;
@@ -911,6 +927,7 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
911 if (adapter->ptp_tx_skb) { 927 if (adapter->ptp_tx_skb) {
912 dev_kfree_skb_any(adapter->ptp_tx_skb); 928 dev_kfree_skb_any(adapter->ptp_tx_skb);
913 adapter->ptp_tx_skb = NULL; 929 adapter->ptp_tx_skb = NULL;
930 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
914 } 931 }
915 932
916 if (adapter->ptp_clock) { 933 if (adapter->ptp_clock) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dff0977876f7..e6c68d396c99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 8bd29190514e..139eaddfb2ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index e74ae3682733..ef6df3d6437e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0d39cfc4a3bf..8a6ff2423f07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -54,6 +55,7 @@
54#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a 55#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
55#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 56#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 57#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
58#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 59#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 60#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
59#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B 61#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
@@ -1609,6 +1611,9 @@ enum {
1609#define IXGBE_MACC_FS 0x00040000 1611#define IXGBE_MACC_FS 0x00040000
1610#define IXGBE_MAC_RX2TX_LPBK 0x00000002 1612#define IXGBE_MAC_RX2TX_LPBK 0x00000002
1611 1613
1614/* Veto Bit definiton */
1615#define IXGBE_MMNGC_MNG_VETO 0x00000001
1616
1612/* LINKS Bit Masks */ 1617/* LINKS Bit Masks */
1613#define IXGBE_LINKS_KX_AN_COMP 0x80000000 1618#define IXGBE_LINKS_KX_AN_COMP 0x80000000
1614#define IXGBE_LINKS_UP 0x40000000 1619#define IXGBE_LINKS_UP 0x40000000
@@ -1788,6 +1793,9 @@ enum {
1788#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ 1793#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
1789#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ 1794#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
1790 1795
1796#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
1797#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */
1798
1791#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS 1799#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
1792#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1800#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1793#endif 1801#endif
@@ -1853,8 +1861,19 @@ enum {
1853#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 1861#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
1854#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 1862#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
1855 1863
1864#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
1865#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
1866#define IXGBE_PCIDEVCTRL2_50_100us 0x1
1867#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
1868#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
1869#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
1870#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
1871#define IXGBE_PCIDEVCTRL2_1_2s 0xa
1872#define IXGBE_PCIDEVCTRL2_4_8s 0xd
1873#define IXGBE_PCIDEVCTRL2_17_34s 0xe
1874
1856/* Number of 100 microseconds we wait for PCI Express master disable */ 1875/* Number of 100 microseconds we wait for PCI Express master disable */
1857#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 1876#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
1858 1877
1859/* RAH */ 1878/* RAH */
1860#define IXGBE_RAH_VIND_MASK 0x003C0000 1879#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -2645,7 +2664,6 @@ enum ixgbe_sfp_type {
2645enum ixgbe_media_type { 2664enum ixgbe_media_type {
2646 ixgbe_media_type_unknown = 0, 2665 ixgbe_media_type_unknown = 0,
2647 ixgbe_media_type_fiber, 2666 ixgbe_media_type_fiber,
2648 ixgbe_media_type_fiber_fixed,
2649 ixgbe_media_type_fiber_qsfp, 2667 ixgbe_media_type_fiber_qsfp,
2650 ixgbe_media_type_fiber_lco, 2668 ixgbe_media_type_fiber_lco,
2651 ixgbe_media_type_copper, 2669 ixgbe_media_type_copper,
@@ -2858,6 +2876,8 @@ struct ixgbe_mac_operations {
2858 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2876 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2859 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); 2877 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
2860 void (*release_swfw_sync)(struct ixgbe_hw *, u16); 2878 void (*release_swfw_sync)(struct ixgbe_hw *, u16);
2879 s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
2880 s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
2861 2881
2862 /* Link */ 2882 /* Link */
2863 void (*disable_tx_laser)(struct ixgbe_hw *); 2883 void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2901,7 +2921,6 @@ struct ixgbe_mac_operations {
2901 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 2921 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
2902 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); 2922 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
2903 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); 2923 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
2904 bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
2905}; 2924};
2906 2925
2907struct ixgbe_phy_operations { 2926struct ixgbe_phy_operations {
@@ -2957,7 +2976,6 @@ struct ixgbe_mac_info {
2957 u32 max_tx_queues; 2976 u32 max_tx_queues;
2958 u32 max_rx_queues; 2977 u32 max_rx_queues;
2959 u32 orig_autoc; 2978 u32 orig_autoc;
2960 u32 cached_autoc;
2961 u32 orig_autoc2; 2979 u32 orig_autoc2;
2962 bool orig_link_settings_stored; 2980 bool orig_link_settings_stored;
2963 bool autotry_restart; 2981 bool autotry_restart;
@@ -3033,7 +3051,6 @@ struct ixgbe_hw {
3033 bool adapter_stopped; 3051 bool adapter_stopped;
3034 bool force_full_reset; 3052 bool force_full_reset;
3035 bool allow_unsupported_sfp; 3053 bool allow_unsupported_sfp;
3036 bool mng_fw_enabled;
3037 bool wol_enabled; 3054 bool wol_enabled;
3038}; 3055};
3039 3056
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 24b80a6cfca4..188a5974b85c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -61,6 +62,7 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; 62 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; 63 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; 64 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
65 mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
64 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; 66 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
65 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; 67 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
66 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 68 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -187,7 +189,6 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
187 goto out; 189 goto out;
188 190
189 ret_val = ixgbe_start_hw_gen2(hw); 191 ret_val = ixgbe_start_hw_gen2(hw);
190 hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
191out: 192out:
192 return ret_val; 193 return ret_val;
193} 194}
@@ -854,7 +855,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
854 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 855 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
855 .get_thermal_sensor_data = NULL, 856 .get_thermal_sensor_data = NULL,
856 .init_thermal_sensor_thresh = NULL, 857 .init_thermal_sensor_thresh = NULL,
857 .mng_fw_enabled = NULL, 858 .prot_autoc_read = &prot_autoc_read_generic,
859 .prot_autoc_write = &prot_autoc_write_generic,
858}; 860};
859 861
860static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 862static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index f68b78c732a8..1baecb60f065 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -530,41 +530,55 @@ static const u32 register_test_patterns[] = {
530 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF 530 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
531}; 531};
532 532
533#define REG_PATTERN_TEST(R, M, W) \ 533static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
534{ \ 534 int reg, u32 mask, u32 write)
535 u32 pat, val, before; \ 535{
536 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ 536 u32 pat, val, before;
537 before = readl(adapter->hw.hw_addr + R); \ 537
538 writel((register_test_patterns[pat] & W), \ 538 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
539 (adapter->hw.hw_addr + R)); \ 539 *data = 1;
540 val = readl(adapter->hw.hw_addr + R); \ 540 return true;
541 if (val != (register_test_patterns[pat] & W & M)) { \ 541 }
542 hw_dbg(&adapter->hw, \ 542 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
543 "pattern test reg %04X failed: got " \ 543 before = ixgbevf_read_reg(&adapter->hw, reg);
544 "0x%08X expected 0x%08X\n", \ 544 ixgbe_write_reg(&adapter->hw, reg,
545 R, val, (register_test_patterns[pat] & W & M)); \ 545 register_test_patterns[pat] & write);
546 *data = R; \ 546 val = ixgbevf_read_reg(&adapter->hw, reg);
547 writel(before, adapter->hw.hw_addr + R); \ 547 if (val != (register_test_patterns[pat] & write & mask)) {
548 return 1; \ 548 hw_dbg(&adapter->hw,
549 } \ 549 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
550 writel(before, adapter->hw.hw_addr + R); \ 550 reg, val,
551 } \ 551 register_test_patterns[pat] & write & mask);
552 *data = reg;
553 ixgbe_write_reg(&adapter->hw, reg, before);
554 return true;
555 }
556 ixgbe_write_reg(&adapter->hw, reg, before);
557 }
558 return false;
552} 559}
553 560
554#define REG_SET_AND_CHECK(R, M, W) \ 561static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
555{ \ 562 int reg, u32 mask, u32 write)
556 u32 val, before; \ 563{
557 before = readl(adapter->hw.hw_addr + R); \ 564 u32 val, before;
558 writel((W & M), (adapter->hw.hw_addr + R)); \ 565
559 val = readl(adapter->hw.hw_addr + R); \ 566 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
560 if ((W & M) != (val & M)) { \ 567 *data = 1;
561 pr_err("set/check reg %04X test failed: got 0x%08X expected " \ 568 return true;
562 "0x%08X\n", R, (val & M), (W & M)); \ 569 }
563 *data = R; \ 570 before = ixgbevf_read_reg(&adapter->hw, reg);
564 writel(before, (adapter->hw.hw_addr + R)); \ 571 ixgbe_write_reg(&adapter->hw, reg, write & mask);
565 return 1; \ 572 val = ixgbevf_read_reg(&adapter->hw, reg);
566 } \ 573 if ((write & mask) != (val & mask)) {
567 writel(before, (adapter->hw.hw_addr + R)); \ 574 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
575 reg, (val & mask), write & mask);
576 *data = reg;
577 ixgbe_write_reg(&adapter->hw, reg, before);
578 return true;
579 }
580 ixgbe_write_reg(&adapter->hw, reg, before);
581 return false;
568} 582}
569 583
570static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) 584static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
@@ -572,6 +586,12 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
572 const struct ixgbevf_reg_test *test; 586 const struct ixgbevf_reg_test *test;
573 u32 i; 587 u32 i;
574 588
589 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
590 dev_err(&adapter->pdev->dev,
591 "Adapter removed - register test blocked\n");
592 *data = 1;
593 return 1;
594 }
575 test = reg_test_vf; 595 test = reg_test_vf;
576 596
577 /* 597 /*
@@ -580,38 +600,47 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
580 */ 600 */
581 while (test->reg) { 601 while (test->reg) {
582 for (i = 0; i < test->array_len; i++) { 602 for (i = 0; i < test->array_len; i++) {
603 bool b = false;
604
583 switch (test->test_type) { 605 switch (test->test_type) {
584 case PATTERN_TEST: 606 case PATTERN_TEST:
585 REG_PATTERN_TEST(test->reg + (i * 0x40), 607 b = reg_pattern_test(adapter, data,
586 test->mask, 608 test->reg + (i * 0x40),
587 test->write); 609 test->mask,
610 test->write);
588 break; 611 break;
589 case SET_READ_TEST: 612 case SET_READ_TEST:
590 REG_SET_AND_CHECK(test->reg + (i * 0x40), 613 b = reg_set_and_check(adapter, data,
591 test->mask, 614 test->reg + (i * 0x40),
592 test->write); 615 test->mask,
616 test->write);
593 break; 617 break;
594 case WRITE_NO_TEST: 618 case WRITE_NO_TEST:
595 writel(test->write, 619 ixgbe_write_reg(&adapter->hw,
596 (adapter->hw.hw_addr + test->reg) 620 test->reg + (i * 0x40),
597 + (i * 0x40)); 621 test->write);
598 break; 622 break;
599 case TABLE32_TEST: 623 case TABLE32_TEST:
600 REG_PATTERN_TEST(test->reg + (i * 4), 624 b = reg_pattern_test(adapter, data,
601 test->mask, 625 test->reg + (i * 4),
602 test->write); 626 test->mask,
627 test->write);
603 break; 628 break;
604 case TABLE64_TEST_LO: 629 case TABLE64_TEST_LO:
605 REG_PATTERN_TEST(test->reg + (i * 8), 630 b = reg_pattern_test(adapter, data,
606 test->mask, 631 test->reg + (i * 8),
607 test->write); 632 test->mask,
633 test->write);
608 break; 634 break;
609 case TABLE64_TEST_HI: 635 case TABLE64_TEST_HI:
610 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 636 b = reg_pattern_test(adapter, data,
611 test->mask, 637 test->reg + 4 + (i * 8),
612 test->write); 638 test->mask,
639 test->write);
613 break; 640 break;
614 } 641 }
642 if (b)
643 return 1;
615 } 644 }
616 test++; 645 test++;
617 } 646 }
@@ -626,6 +655,14 @@ static void ixgbevf_diag_test(struct net_device *netdev,
626 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 655 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
627 bool if_running = netif_running(netdev); 656 bool if_running = netif_running(netdev);
628 657
658 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
659 dev_err(&adapter->pdev->dev,
660 "Adapter removed - test blocked\n");
661 data[0] = 1;
662 data[1] = 1;
663 eth_test->flags |= ETH_TEST_FL_FAILED;
664 return;
665 }
629 set_bit(__IXGBEVF_TESTING, &adapter->state); 666 set_bit(__IXGBEVF_TESTING, &adapter->state);
630 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 667 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
631 /* Offline tests */ 668 /* Offline tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 54829326bb09..e7e7d695816b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -315,6 +315,11 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
315 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 315 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
316} 316}
317 317
318static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
319{
320 writel(value, ring->tail);
321}
322
318#define IXGBEVF_RX_DESC(R, i) \ 323#define IXGBEVF_RX_DESC(R, i) \
319 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) 324 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
320#define IXGBEVF_TX_DESC(R, i) \ 325#define IXGBEVF_TX_DESC(R, i) \
@@ -401,6 +406,7 @@ struct ixgbevf_adapter {
401 u64 bp_tx_missed; 406 u64 bp_tx_missed;
402#endif 407#endif
403 408
409 u8 __iomem *io_addr; /* Mainly for iounmap use */
404 u32 link_speed; 410 u32 link_speed;
405 bool link_up; 411 bool link_up;
406 412
@@ -412,7 +418,9 @@ struct ixgbevf_adapter {
412enum ixbgevf_state_t { 418enum ixbgevf_state_t {
413 __IXGBEVF_TESTING, 419 __IXGBEVF_TESTING,
414 __IXGBEVF_RESETTING, 420 __IXGBEVF_RESETTING,
415 __IXGBEVF_DOWN 421 __IXGBEVF_DOWN,
422 __IXGBEVF_DISABLED,
423 __IXGBEVF_REMOVING,
416}; 424};
417 425
418struct ixgbevf_cb { 426struct ixgbevf_cb {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9df28985eba7..4ba139b2d25a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -99,6 +99,49 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
101 101
102static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
103{
104 struct ixgbevf_adapter *adapter = hw->back;
105
106 if (!hw->hw_addr)
107 return;
108 hw->hw_addr = NULL;
109 dev_err(&adapter->pdev->dev, "Adapter removed\n");
110 schedule_work(&adapter->watchdog_task);
111}
112
113static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
114{
115 u32 value;
116
117 /* The following check not only optimizes a bit by not
118 * performing a read on the status register when the
119 * register just read was a status register read that
120 * returned IXGBE_FAILED_READ_REG. It also blocks any
121 * potential recursion.
122 */
123 if (reg == IXGBE_VFSTATUS) {
124 ixgbevf_remove_adapter(hw);
125 return;
126 }
127 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
128 if (value == IXGBE_FAILED_READ_REG)
129 ixgbevf_remove_adapter(hw);
130}
131
132u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
133{
134 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
135 u32 value;
136
137 if (IXGBE_REMOVED(reg_addr))
138 return IXGBE_FAILED_READ_REG;
139 value = readl(reg_addr + reg);
140 if (unlikely(value == IXGBE_FAILED_READ_REG))
141 ixgbevf_check_remove(hw, reg);
142 return value;
143}
144
102static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, 145static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
103 u32 val) 146 u32 val)
104{ 147{
@@ -111,7 +154,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
111 * such as IA-64). 154 * such as IA-64).
112 */ 155 */
113 wmb(); 156 wmb();
114 writel(val, rx_ring->tail); 157 ixgbevf_write_tail(rx_ring, val);
115} 158}
116 159
117/** 160/**
@@ -516,7 +559,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
516 /* Workaround hardware that can't do proper VEPA multicast 559 /* Workaround hardware that can't do proper VEPA multicast
517 * source pruning. 560 * source pruning.
518 */ 561 */
519 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 562 if ((skb->pkt_type == PACKET_BROADCAST ||
563 skb->pkt_type == PACKET_MULTICAST) &&
520 ether_addr_equal(rx_ring->netdev->dev_addr, 564 ether_addr_equal(rx_ring->netdev->dev_addr,
521 eth_hdr(skb)->h_source)) { 565 eth_hdr(skb)->h_source)) {
522 dev_kfree_skb_irq(skb); 566 dev_kfree_skb_irq(skb);
@@ -607,7 +651,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
607 napi_complete(napi); 651 napi_complete(napi);
608 if (adapter->rx_itr_setting & 1) 652 if (adapter->rx_itr_setting & 1)
609 ixgbevf_set_itr(q_vector); 653 ixgbevf_set_itr(q_vector);
610 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 654 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
655 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
611 ixgbevf_irq_enable_queues(adapter, 656 ixgbevf_irq_enable_queues(adapter,
612 1 << q_vector->v_idx); 657 1 << q_vector->v_idx);
613 658
@@ -832,7 +877,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
832 877
833 hw->mac.get_link_status = 1; 878 hw->mac.get_link_status = 1;
834 879
835 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 880 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
881 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
836 mod_timer(&adapter->watchdog_timer, jiffies); 882 mod_timer(&adapter->watchdog_timer, jiffies);
837 883
838 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 884 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1136,7 +1182,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1136 /* reset head and tail pointers */ 1182 /* reset head and tail pointers */
1137 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); 1183 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1138 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); 1184 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1139 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx); 1185 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1140 1186
1141 /* reset ntu and ntc to place SW in sync with hardwdare */ 1187 /* reset ntu and ntc to place SW in sync with hardwdare */
1142 ring->next_to_clean = 0; 1188 ring->next_to_clean = 0;
@@ -1256,6 +1302,8 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1256 u32 rxdctl; 1302 u32 rxdctl;
1257 u8 reg_idx = ring->reg_idx; 1303 u8 reg_idx = ring->reg_idx;
1258 1304
1305 if (IXGBE_REMOVED(hw->hw_addr))
1306 return;
1259 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1307 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1260 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1308 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1261 1309
@@ -1281,6 +1329,8 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1281 u32 rxdctl; 1329 u32 rxdctl;
1282 u8 reg_idx = ring->reg_idx; 1330 u8 reg_idx = ring->reg_idx;
1283 1331
1332 if (IXGBE_REMOVED(hw->hw_addr))
1333 return;
1284 do { 1334 do {
1285 usleep_range(1000, 2000); 1335 usleep_range(1000, 2000);
1286 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1336 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
@@ -1315,7 +1365,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1315 /* reset head and tail pointers */ 1365 /* reset head and tail pointers */
1316 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1366 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1317 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1367 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1318 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx); 1368 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1319 1369
1320 /* reset ntu and ntc to place SW in sync with hardwdare */ 1370 /* reset ntu and ntc to place SW in sync with hardwdare */
1321 ring->next_to_clean = 0; 1371 ring->next_to_clean = 0;
@@ -1617,6 +1667,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1617 1667
1618 spin_unlock_bh(&adapter->mbx_lock); 1668 spin_unlock_bh(&adapter->mbx_lock);
1619 1669
1670 smp_mb__before_clear_bit();
1620 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1671 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1621 ixgbevf_napi_enable_all(adapter); 1672 ixgbevf_napi_enable_all(adapter);
1622 1673
@@ -1741,7 +1792,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1741 int i; 1792 int i;
1742 1793
1743 /* signal that we are down to the interrupt handler */ 1794 /* signal that we are down to the interrupt handler */
1744 set_bit(__IXGBEVF_DOWN, &adapter->state); 1795 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
1796 return; /* do nothing if already down */
1745 1797
1746 /* disable all enabled rx queues */ 1798 /* disable all enabled rx queues */
1747 for (i = 0; i < adapter->num_rx_queues; i++) 1799 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -1817,7 +1869,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1817static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1869static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1818 int vectors) 1870 int vectors)
1819{ 1871{
1820 int err = 0;
1821 int vector_threshold; 1872 int vector_threshold;
1822 1873
1823 /* We'll want at least 2 (vector_threshold): 1874 /* We'll want at least 2 (vector_threshold):
@@ -1831,33 +1882,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1831 * Right now, we simply care about how many we'll get; we'll 1882 * Right now, we simply care about how many we'll get; we'll
1832 * set them up later while requesting irq's. 1883 * set them up later while requesting irq's.
1833 */ 1884 */
1834 while (vectors >= vector_threshold) { 1885 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1835 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1886 vector_threshold, vectors);
1836 vectors);
1837 if (!err || err < 0) /* Success or a nasty failure. */
1838 break;
1839 else /* err == number of vectors we should try again with */
1840 vectors = err;
1841 }
1842
1843 if (vectors < vector_threshold)
1844 err = -ENOMEM;
1845 1887
1846 if (err) { 1888 if (vectors < 0) {
1847 dev_err(&adapter->pdev->dev, 1889 dev_err(&adapter->pdev->dev,
1848 "Unable to allocate MSI-X interrupts\n"); 1890 "Unable to allocate MSI-X interrupts\n");
1849 kfree(adapter->msix_entries); 1891 kfree(adapter->msix_entries);
1850 adapter->msix_entries = NULL; 1892 adapter->msix_entries = NULL;
1851 } else { 1893 return vectors;
1852 /*
1853 * Adjust for only the vectors we'll use, which is minimum
1854 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1855 * vectors we were allocated.
1856 */
1857 adapter->num_msix_vectors = vectors;
1858 } 1894 }
1859 1895
1860 return err; 1896 /* Adjust for only the vectors we'll use, which is minimum
1897 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1898 * vectors we were allocated.
1899 */
1900 adapter->num_msix_vectors = vectors;
1901
1902 return 0;
1861} 1903}
1862 1904
1863/** 1905/**
@@ -2338,6 +2380,7 @@ static void ixgbevf_reset_task(struct work_struct *work)
2338 2380
2339 /* If we're already down or resetting, just bail */ 2381 /* If we're already down or resetting, just bail */
2340 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2382 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2383 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2341 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2384 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2342 return; 2385 return;
2343 2386
@@ -2361,6 +2404,14 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2361 bool link_up = adapter->link_up; 2404 bool link_up = adapter->link_up;
2362 s32 need_reset; 2405 s32 need_reset;
2363 2406
2407 if (IXGBE_REMOVED(hw->hw_addr)) {
2408 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2409 rtnl_lock();
2410 ixgbevf_down(adapter);
2411 rtnl_unlock();
2412 }
2413 return;
2414 }
2364 ixgbevf_queue_reset_subtask(adapter); 2415 ixgbevf_queue_reset_subtask(adapter);
2365 2416
2366 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2417 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2422,7 +2473,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2422 2473
2423pf_has_reset: 2474pf_has_reset:
2424 /* Reset the timer */ 2475 /* Reset the timer */
2425 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2476 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2477 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2426 mod_timer(&adapter->watchdog_timer, 2478 mod_timer(&adapter->watchdog_timer,
2427 round_jiffies(jiffies + (2 * HZ))); 2479 round_jiffies(jiffies + (2 * HZ)));
2428 2480
@@ -2787,6 +2839,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2787 u32 vlan_macip_lens, type_tucmd; 2839 u32 vlan_macip_lens, type_tucmd;
2788 u32 mss_l4len_idx, l4len; 2840 u32 mss_l4len_idx, l4len;
2789 2841
2842 if (skb->ip_summed != CHECKSUM_PARTIAL)
2843 return 0;
2844
2790 if (!skb_is_gso(skb)) 2845 if (!skb_is_gso(skb))
2791 return 0; 2846 return 0;
2792 2847
@@ -2857,12 +2912,12 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2857 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2912 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2858 u8 l4_hdr = 0; 2913 u8 l4_hdr = 0;
2859 switch (skb->protocol) { 2914 switch (skb->protocol) {
2860 case __constant_htons(ETH_P_IP): 2915 case htons(ETH_P_IP):
2861 vlan_macip_lens |= skb_network_header_len(skb); 2916 vlan_macip_lens |= skb_network_header_len(skb);
2862 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2917 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2863 l4_hdr = ip_hdr(skb)->protocol; 2918 l4_hdr = ip_hdr(skb)->protocol;
2864 break; 2919 break;
2865 case __constant_htons(ETH_P_IPV6): 2920 case htons(ETH_P_IPV6):
2866 vlan_macip_lens |= skb_network_header_len(skb); 2921 vlan_macip_lens |= skb_network_header_len(skb);
2867 l4_hdr = ipv6_hdr(skb)->nexthdr; 2922 l4_hdr = ipv6_hdr(skb)->nexthdr;
2868 break; 2923 break;
@@ -3060,7 +3115,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3060 tx_ring->next_to_use = i; 3115 tx_ring->next_to_use = i;
3061 3116
3062 /* notify HW of packet */ 3117 /* notify HW of packet */
3063 writel(i, tx_ring->tail); 3118 ixgbevf_write_tail(tx_ring, i);
3064 3119
3065 return; 3120 return;
3066dma_error: 3121dma_error:
@@ -3165,7 +3220,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3165 tso = ixgbevf_tso(tx_ring, first, &hdr_len); 3220 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3166 if (tso < 0) 3221 if (tso < 0)
3167 goto out_drop; 3222 goto out_drop;
3168 else 3223 else if (!tso)
3169 ixgbevf_tx_csum(tx_ring, first); 3224 ixgbevf_tx_csum(tx_ring, first);
3170 3225
3171 ixgbevf_tx_map(tx_ring, first, hdr_len); 3226 ixgbevf_tx_map(tx_ring, first, hdr_len);
@@ -3274,7 +3329,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3274 return retval; 3329 return retval;
3275 3330
3276#endif 3331#endif
3277 pci_disable_device(pdev); 3332 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3333 pci_disable_device(pdev);
3278 3334
3279 return 0; 3335 return 0;
3280} 3336}
@@ -3286,7 +3342,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
3286 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3342 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3287 u32 err; 3343 u32 err;
3288 3344
3289 pci_set_power_state(pdev, PCI_D0);
3290 pci_restore_state(pdev); 3345 pci_restore_state(pdev);
3291 /* 3346 /*
3292 * pci_restore_state clears dev->state_saved so call 3347 * pci_restore_state clears dev->state_saved so call
@@ -3299,6 +3354,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
3299 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3354 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3300 return err; 3355 return err;
3301 } 3356 }
3357 smp_mb__before_clear_bit();
3358 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3302 pci_set_master(pdev); 3359 pci_set_master(pdev);
3303 3360
3304 ixgbevf_reset(adapter); 3361 ixgbevf_reset(adapter);
@@ -3344,10 +3401,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3344 for (i = 0; i < adapter->num_rx_queues; i++) { 3401 for (i = 0; i < adapter->num_rx_queues; i++) {
3345 ring = adapter->rx_ring[i]; 3402 ring = adapter->rx_ring[i];
3346 do { 3403 do {
3347 start = u64_stats_fetch_begin_bh(&ring->syncp); 3404 start = u64_stats_fetch_begin_irq(&ring->syncp);
3348 bytes = ring->stats.bytes; 3405 bytes = ring->stats.bytes;
3349 packets = ring->stats.packets; 3406 packets = ring->stats.packets;
3350 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3407 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3351 stats->rx_bytes += bytes; 3408 stats->rx_bytes += bytes;
3352 stats->rx_packets += packets; 3409 stats->rx_packets += packets;
3353 } 3410 }
@@ -3355,10 +3412,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3355 for (i = 0; i < adapter->num_tx_queues; i++) { 3412 for (i = 0; i < adapter->num_tx_queues; i++) {
3356 ring = adapter->tx_ring[i]; 3413 ring = adapter->tx_ring[i];
3357 do { 3414 do {
3358 start = u64_stats_fetch_begin_bh(&ring->syncp); 3415 start = u64_stats_fetch_begin_irq(&ring->syncp);
3359 bytes = ring->stats.bytes; 3416 bytes = ring->stats.bytes;
3360 packets = ring->stats.packets; 3417 packets = ring->stats.packets;
3361 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3418 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3362 stats->tx_bytes += bytes; 3419 stats->tx_bytes += bytes;
3363 stats->tx_packets += packets; 3420 stats->tx_packets += packets;
3364 } 3421 }
@@ -3460,6 +3517,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3460 3517
3461 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3518 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3462 pci_resource_len(pdev, 0)); 3519 pci_resource_len(pdev, 0));
3520 adapter->io_addr = hw->hw_addr;
3463 if (!hw->hw_addr) { 3521 if (!hw->hw_addr) {
3464 err = -EIO; 3522 err = -EIO;
3465 goto err_ioremap; 3523 goto err_ioremap;
@@ -3545,14 +3603,15 @@ err_register:
3545 ixgbevf_clear_interrupt_scheme(adapter); 3603 ixgbevf_clear_interrupt_scheme(adapter);
3546err_sw_init: 3604err_sw_init:
3547 ixgbevf_reset_interrupt_capability(adapter); 3605 ixgbevf_reset_interrupt_capability(adapter);
3548 iounmap(hw->hw_addr); 3606 iounmap(adapter->io_addr);
3549err_ioremap: 3607err_ioremap:
3550 free_netdev(netdev); 3608 free_netdev(netdev);
3551err_alloc_etherdev: 3609err_alloc_etherdev:
3552 pci_release_regions(pdev); 3610 pci_release_regions(pdev);
3553err_pci_reg: 3611err_pci_reg:
3554err_dma: 3612err_dma:
3555 pci_disable_device(pdev); 3613 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3614 pci_disable_device(pdev);
3556 return err; 3615 return err;
3557} 3616}
3558 3617
@@ -3570,7 +3629,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3570 struct net_device *netdev = pci_get_drvdata(pdev); 3629 struct net_device *netdev = pci_get_drvdata(pdev);
3571 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3630 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3572 3631
3573 set_bit(__IXGBEVF_DOWN, &adapter->state); 3632 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3574 3633
3575 del_timer_sync(&adapter->watchdog_timer); 3634 del_timer_sync(&adapter->watchdog_timer);
3576 3635
@@ -3583,14 +3642,15 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3583 ixgbevf_clear_interrupt_scheme(adapter); 3642 ixgbevf_clear_interrupt_scheme(adapter);
3584 ixgbevf_reset_interrupt_capability(adapter); 3643 ixgbevf_reset_interrupt_capability(adapter);
3585 3644
3586 iounmap(adapter->hw.hw_addr); 3645 iounmap(adapter->io_addr);
3587 pci_release_regions(pdev); 3646 pci_release_regions(pdev);
3588 3647
3589 hw_dbg(&adapter->hw, "Remove complete\n"); 3648 hw_dbg(&adapter->hw, "Remove complete\n");
3590 3649
3591 free_netdev(netdev); 3650 free_netdev(netdev);
3592 3651
3593 pci_disable_device(pdev); 3652 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3653 pci_disable_device(pdev);
3594} 3654}
3595 3655
3596/** 3656/**
@@ -3607,15 +3667,20 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3607 struct net_device *netdev = pci_get_drvdata(pdev); 3667 struct net_device *netdev = pci_get_drvdata(pdev);
3608 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3668 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3609 3669
3670 rtnl_lock();
3610 netif_device_detach(netdev); 3671 netif_device_detach(netdev);
3611 3672
3612 if (state == pci_channel_io_perm_failure) 3673 if (state == pci_channel_io_perm_failure) {
3674 rtnl_unlock();
3613 return PCI_ERS_RESULT_DISCONNECT; 3675 return PCI_ERS_RESULT_DISCONNECT;
3676 }
3614 3677
3615 if (netif_running(netdev)) 3678 if (netif_running(netdev))
3616 ixgbevf_down(adapter); 3679 ixgbevf_down(adapter);
3617 3680
3618 pci_disable_device(pdev); 3681 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3682 pci_disable_device(pdev);
3683 rtnl_unlock();
3619 3684
3620 /* Request a slot slot reset. */ 3685 /* Request a slot slot reset. */
3621 return PCI_ERS_RESULT_NEED_RESET; 3686 return PCI_ERS_RESULT_NEED_RESET;
@@ -3639,6 +3704,8 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3639 return PCI_ERS_RESULT_DISCONNECT; 3704 return PCI_ERS_RESULT_DISCONNECT;
3640 } 3705 }
3641 3706
3707 smp_mb__before_clear_bit();
3708 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3642 pci_set_master(pdev); 3709 pci_set_master(pdev);
3643 3710
3644 ixgbevf_reset(adapter); 3711 ixgbevf_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index debd8c0e1f28..09dd8f698bea 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -70,16 +70,6 @@
70#define IXGBE_VFGOTC_MSB 0x02024 70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034 71#define IXGBE_VFMPRC 0x01034
72 72
73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
74
75#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
76
77#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
78 writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
79
80#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
81 readl((a)->hw_addr + (reg) + ((offset) << 2)))
82
83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) 73#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
84 74
85#endif /* _IXGBEVF_REGS_H_ */ 75#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 7b1f502d1716..3061d1890471 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -172,6 +172,37 @@ struct ixgbevf_info {
172 const struct ixgbe_mac_operations *mac_ops; 172 const struct ixgbe_mac_operations *mac_ops;
173}; 173};
174 174
175#define IXGBE_FAILED_READ_REG 0xffffffffU
176
177#define IXGBE_REMOVED(a) unlikely(!(a))
178
179static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
180{
181 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
182
183 if (IXGBE_REMOVED(reg_addr))
184 return;
185 writel(value, reg_addr + reg);
186}
187#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
188
189u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg);
190#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r)
191
192static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
193 u32 offset, u32 value)
194{
195 ixgbe_write_reg(hw, reg + (offset << 2), value);
196}
197#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
198
199static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
200 u32 offset)
201{
202 return ixgbevf_read_reg(hw, reg + (offset << 2));
203}
204#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
205
175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); 206void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); 207int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
177int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, 208int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f5685c0d0579..b0c6050479eb 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2054,19 +2054,6 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2054} 2054}
2055 2055
2056static int 2056static int
2057jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
2058{
2059 if (unlikely(skb_shinfo(skb)->gso_size &&
2060 skb_header_cloned(skb) &&
2061 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
2062 dev_kfree_skb(skb);
2063 return -1;
2064 }
2065
2066 return 0;
2067}
2068
2069static int
2070jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2057jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2071{ 2058{
2072 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); 2059 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
@@ -2225,7 +2212,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2225 struct jme_adapter *jme = netdev_priv(netdev); 2212 struct jme_adapter *jme = netdev_priv(netdev);
2226 int idx; 2213 int idx;
2227 2214
2228 if (unlikely(jme_expand_header(jme, skb))) { 2215 if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) {
2216 dev_kfree_skb_any(skb);
2229 ++(NET_STAT(jme).tx_dropped); 2217 ++(NET_STAT(jme).tx_dropped);
2230 return NETDEV_TX_OK; 2218 return NETDEV_TX_OK;
2231 } 2219 }
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a2565ce22b7c..b7b8d74c22d9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -730,7 +730,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
730 unlikely(tag_bytes & ~12)) { 730 unlikely(tag_bytes & ~12)) {
731 if (skb_checksum_help(skb) == 0) 731 if (skb_checksum_help(skb) == 0)
732 goto no_csum; 732 goto no_csum;
733 kfree_skb(skb); 733 dev_kfree_skb_any(skb);
734 return 1; 734 return 1;
735 } 735 }
736 736
@@ -819,7 +819,7 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
819 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 819 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
820 if (net_ratelimit()) 820 if (net_ratelimit())
821 netdev_err(dev, "tx queue full?!\n"); 821 netdev_err(dev, "tx queue full?!\n");
822 kfree_skb(skb); 822 dev_kfree_skb_any(skb);
823 return NETDEV_TX_OK; 823 return NETDEV_TX_OK;
824 } 824 }
825 825
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index fd409d76b811..b161a525fc5b 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -167,11 +167,6 @@ out:
167 return ret; 167 return ret;
168} 168}
169 169
170static int orion_mdio_reset(struct mii_bus *bus)
171{
172 return 0;
173}
174
175static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id) 170static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
176{ 171{
177 struct orion_mdio_dev *dev = dev_id; 172 struct orion_mdio_dev *dev = dev_id;
@@ -209,7 +204,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
209 bus->name = "orion_mdio_bus"; 204 bus->name = "orion_mdio_bus";
210 bus->read = orion_mdio_read; 205 bus->read = orion_mdio_read;
211 bus->write = orion_mdio_write; 206 bus->write = orion_mdio_write;
212 bus->reset = orion_mdio_reset;
213 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", 207 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
214 dev_name(&pdev->dev)); 208 dev_name(&pdev->dev));
215 bus->parent = &pdev->dev; 209 bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8d76fca7fde7..d04b1c3c9b85 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -510,12 +510,12 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
510 510
511 cpu_stats = per_cpu_ptr(pp->stats, cpu); 511 cpu_stats = per_cpu_ptr(pp->stats, cpu);
512 do { 512 do {
513 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); 513 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
514 rx_packets = cpu_stats->rx_packets; 514 rx_packets = cpu_stats->rx_packets;
515 rx_bytes = cpu_stats->rx_bytes; 515 rx_bytes = cpu_stats->rx_bytes;
516 tx_packets = cpu_stats->tx_packets; 516 tx_packets = cpu_stats->tx_packets;
517 tx_bytes = cpu_stats->tx_bytes; 517 tx_bytes = cpu_stats->tx_bytes;
518 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); 518 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
519 519
520 stats->rx_packets += rx_packets; 520 stats->rx_packets += rx_packets;
521 stats->rx_bytes += rx_bytes; 521 stats->rx_bytes += rx_bytes;
@@ -2761,7 +2761,6 @@ static int mvneta_probe(struct platform_device *pdev)
2761 const char *mac_from; 2761 const char *mac_from;
2762 int phy_mode; 2762 int phy_mode;
2763 int err; 2763 int err;
2764 int cpu;
2765 2764
2766 /* Our multiqueue support is not complete, so for now, only 2765 /* Our multiqueue support is not complete, so for now, only
2767 * allow the usage of the first RX queue 2766 * allow the usage of the first RX queue
@@ -2816,30 +2815,19 @@ static int mvneta_probe(struct platform_device *pdev)
2816 clk_prepare_enable(pp->clk); 2815 clk_prepare_enable(pp->clk);
2817 2816
2818 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2817 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2819 if (!res) {
2820 err = -ENODEV;
2821 goto err_clk;
2822 }
2823
2824 pp->base = devm_ioremap_resource(&pdev->dev, res); 2818 pp->base = devm_ioremap_resource(&pdev->dev, res);
2825 if (pp->base == NULL) { 2819 if (IS_ERR(pp->base)) {
2826 err = PTR_ERR(pp->base); 2820 err = PTR_ERR(pp->base);
2827 goto err_clk; 2821 goto err_clk;
2828 } 2822 }
2829 2823
2830 /* Alloc per-cpu stats */ 2824 /* Alloc per-cpu stats */
2831 pp->stats = alloc_percpu(struct mvneta_pcpu_stats); 2825 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
2832 if (!pp->stats) { 2826 if (!pp->stats) {
2833 err = -ENOMEM; 2827 err = -ENOMEM;
2834 goto err_clk; 2828 goto err_clk;
2835 } 2829 }
2836 2830
2837 for_each_possible_cpu(cpu) {
2838 struct mvneta_pcpu_stats *stats;
2839 stats = per_cpu_ptr(pp->stats, cpu);
2840 u64_stats_init(&stats->syncp);
2841 }
2842
2843 dt_mac_addr = of_get_mac_address(dn); 2831 dt_mac_addr = of_get_mac_address(dn);
2844 if (dt_mac_addr) { 2832 if (dt_mac_addr) {
2845 mac_from = "device tree"; 2833 mac_from = "device tree";
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 597846193869..7f81ae66cc89 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2845,7 +2845,7 @@ mapping_unwind:
2845mapping_error: 2845mapping_error:
2846 if (net_ratelimit()) 2846 if (net_ratelimit())
2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); 2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2848 dev_kfree_skb(skb); 2848 dev_kfree_skb_any(skb);
2849 return NETDEV_TX_OK; 2849 return NETDEV_TX_OK;
2850} 2850}
2851 2851
@@ -3172,7 +3172,7 @@ static void skge_tx_done(struct net_device *dev)
3172 pkts_compl++; 3172 pkts_compl++;
3173 bytes_compl += e->skb->len; 3173 bytes_compl += e->skb->len;
3174 3174
3175 dev_kfree_skb(e->skb); 3175 dev_consume_skb_any(e->skb);
3176 } 3176 }
3177 } 3177 }
3178 netdev_completed_queue(dev, pkts_compl, bytes_compl); 3178 netdev_completed_queue(dev, pkts_compl, bytes_compl);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 55a37ae11440..b81106451a0a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -44,6 +44,8 @@
44#include <linux/prefetch.h> 44#include <linux/prefetch.h>
45#include <linux/debugfs.h> 45#include <linux/debugfs.h>
46#include <linux/mii.h> 46#include <linux/mii.h>
47#include <linux/of_device.h>
48#include <linux/of_net.h>
47 49
48#include <asm/irq.h> 50#include <asm/irq.h>
49 51
@@ -2000,7 +2002,7 @@ mapping_unwind:
2000mapping_error: 2002mapping_error:
2001 if (net_ratelimit()) 2003 if (net_ratelimit())
2002 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); 2004 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2003 dev_kfree_skb(skb); 2005 dev_kfree_skb_any(skb);
2004 return NETDEV_TX_OK; 2006 return NETDEV_TX_OK;
2005} 2007}
2006 2008
@@ -2733,6 +2735,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2733 unsigned int total_bytes[2] = { 0 }; 2735 unsigned int total_bytes[2] = { 0 };
2734 unsigned int total_packets[2] = { 0 }; 2736 unsigned int total_packets[2] = { 0 };
2735 2737
2738 if (to_do <= 0)
2739 return work_done;
2740
2736 rmb(); 2741 rmb();
2737 do { 2742 do {
2738 struct sky2_port *sky2; 2743 struct sky2_port *sky2;
@@ -3906,19 +3911,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
3906 u64 _bytes, _packets; 3911 u64 _bytes, _packets;
3907 3912
3908 do { 3913 do {
3909 start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); 3914 start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
3910 _bytes = sky2->rx_stats.bytes; 3915 _bytes = sky2->rx_stats.bytes;
3911 _packets = sky2->rx_stats.packets; 3916 _packets = sky2->rx_stats.packets;
3912 } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); 3917 } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
3913 3918
3914 stats->rx_packets = _packets; 3919 stats->rx_packets = _packets;
3915 stats->rx_bytes = _bytes; 3920 stats->rx_bytes = _bytes;
3916 3921
3917 do { 3922 do {
3918 start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); 3923 start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
3919 _bytes = sky2->tx_stats.bytes; 3924 _bytes = sky2->tx_stats.bytes;
3920 _packets = sky2->tx_stats.packets; 3925 _packets = sky2->tx_stats.packets;
3921 } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); 3926 } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
3922 3927
3923 stats->tx_packets = _packets; 3928 stats->tx_packets = _packets;
3924 stats->tx_bytes = _bytes; 3929 stats->tx_bytes = _bytes;
@@ -4748,6 +4753,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4748{ 4753{
4749 struct sky2_port *sky2; 4754 struct sky2_port *sky2;
4750 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 4755 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4756 const void *iap;
4751 4757
4752 if (!dev) 4758 if (!dev)
4753 return NULL; 4759 return NULL;
@@ -4805,8 +4811,16 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4805 4811
4806 dev->features |= dev->hw_features; 4812 dev->features |= dev->hw_features;
4807 4813
4808 /* read the mac address */ 4814 /* try to get mac address in the following order:
4809 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); 4815 * 1) from device tree data
4816 * 2) from internal registers set by bootloader
4817 */
4818 iap = of_get_mac_address(hw->pdev->dev.of_node);
4819 if (iap)
4820 memcpy(dev->dev_addr, iap, ETH_ALEN);
4821 else
4822 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
4823 ETH_ALEN);
4810 4824
4811 return dev; 4825 return dev;
4812} 4826}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 563495d8975a..1486ce902a56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config MLX4_EN 5config MLX4_EN
6 tristate "Mellanox Technologies 10Gbit Ethernet support" 6 tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
7 depends on PCI 7 depends on PCI
8 select MLX4_CORE 8 select MLX4_CORE
9 select PTP_1588_CLOCK 9 select PTP_1588_CLOCK
@@ -23,6 +23,13 @@ config MLX4_EN_DCB
23 23
24 If unsure, set to Y 24 If unsure, set to Y
25 25
26config MLX4_EN_VXLAN
27 bool "VXLAN offloads Support"
28 default y
29 depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
30 ---help---
31 Say Y here if you want to use VXLAN offloads in the driver.
32
26config MLX4_CORE 33config MLX4_CORE
27 tristate 34 tristate
28 depends on PCI 35 depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0d02fba94536..78099eab7673 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,16 +800,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
800 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 800 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
801} 801}
802 802
803static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 803static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
804 struct mlx4_vhcr *vhcr,
805 struct mlx4_cmd_mailbox *inbox,
806 struct mlx4_cmd_mailbox *outbox,
807 struct mlx4_cmd_info *cmd)
808{
809 return -EPERM;
810}
811
812static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
813 struct mlx4_vhcr *vhcr, 804 struct mlx4_vhcr *vhcr,
814 struct mlx4_cmd_mailbox *inbox, 805 struct mlx4_cmd_mailbox *inbox,
815 struct mlx4_cmd_mailbox *outbox, 806 struct mlx4_cmd_mailbox *outbox,
@@ -964,6 +955,15 @@ static struct mlx4_cmd_info cmd_info[] = {
964 .wrapper = NULL 955 .wrapper = NULL
965 }, 956 },
966 { 957 {
958 .opcode = MLX4_CMD_CONFIG_DEV,
959 .has_inbox = false,
960 .has_outbox = false,
961 .out_is_imm = false,
962 .encode_slave_id = false,
963 .verify = NULL,
964 .wrapper = mlx4_CMD_EPERM_wrapper
965 },
966 {
967 .opcode = MLX4_CMD_ALLOC_RES, 967 .opcode = MLX4_CMD_ALLOC_RES,
968 .has_inbox = false, 968 .has_inbox = false,
969 .has_outbox = false, 969 .has_outbox = false,
@@ -1258,7 +1258,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1258 .out_is_imm = false, 1258 .out_is_imm = false,
1259 .encode_slave_id = false, 1259 .encode_slave_id = false,
1260 .verify = NULL, 1260 .verify = NULL,
1261 .wrapper = MLX4_CMD_UPDATE_QP_wrapper 1261 .wrapper = mlx4_CMD_EPERM_wrapper
1262 }, 1262 },
1263 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ, 1264 .opcode = MLX4_CMD_GET_OP_REQ,
@@ -1267,7 +1267,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1267 .out_is_imm = false, 1267 .out_is_imm = false,
1268 .encode_slave_id = false, 1268 .encode_slave_id = false,
1269 .verify = NULL, 1269 .verify = NULL,
1270 .wrapper = MLX4_CMD_GET_OP_REQ_wrapper, 1270 .wrapper = mlx4_CMD_EPERM_wrapper,
1271 }, 1271 },
1272 { 1272 {
1273 .opcode = MLX4_CMD_CONF_SPECIAL_QP, 1273 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
@@ -1378,7 +1378,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1378 .out_is_imm = false, 1378 .out_is_imm = false,
1379 .encode_slave_id = false, 1379 .encode_slave_id = false,
1380 .verify = NULL, 1380 .verify = NULL,
1381 .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper 1381 .wrapper = mlx4_CMD_EPERM_wrapper
1382 }, 1382 },
1383}; 1383};
1384 1384
@@ -1643,8 +1643,16 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1643 int port, err; 1643 int port, err;
1644 struct mlx4_vport_state *vp_admin; 1644 struct mlx4_vport_state *vp_admin;
1645 struct mlx4_vport_oper_state *vp_oper; 1645 struct mlx4_vport_oper_state *vp_oper;
1646 1646 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1647 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 1647 &priv->dev, slave);
1648 int min_port = find_first_bit(actv_ports.ports,
1649 priv->dev.caps.num_ports) + 1;
1650 int max_port = min_port - 1 +
1651 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1652
1653 for (port = min_port; port <= max_port; port++) {
1654 if (!test_bit(port - 1, actv_ports.ports))
1655 continue;
1648 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1656 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1649 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 1657 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1650 vp_oper->state = *vp_admin; 1658 vp_oper->state = *vp_admin;
@@ -1685,8 +1693,17 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
1685{ 1693{
1686 int port; 1694 int port;
1687 struct mlx4_vport_oper_state *vp_oper; 1695 struct mlx4_vport_oper_state *vp_oper;
1696 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1697 &priv->dev, slave);
1698 int min_port = find_first_bit(actv_ports.ports,
1699 priv->dev.caps.num_ports) + 1;
1700 int max_port = min_port - 1 +
1701 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1702
1688 1703
1689 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 1704 for (port = min_port; port <= max_port; port++) {
1705 if (!test_bit(port - 1, actv_ports.ports))
1706 continue;
1690 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1707 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1691 if (NO_INDX != vp_oper->vlan_idx) { 1708 if (NO_INDX != vp_oper->vlan_idx) {
1692 __mlx4_unregister_vlan(&priv->dev, 1709 __mlx4_unregister_vlan(&priv->dev,
@@ -2234,6 +2251,112 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2234 return vf+1; 2251 return vf+1;
2235} 2252}
2236 2253
2254int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2255{
2256 if (slave < 1 || slave > dev->num_vfs) {
2257 mlx4_err(dev,
2258 "Bad slave number:%d (number of activated slaves: %lu)\n",
2259 slave, dev->num_slaves);
2260 return -EINVAL;
2261 }
2262 return slave - 1;
2263}
2264
2265struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2266{
2267 struct mlx4_active_ports actv_ports;
2268 int vf;
2269
2270 bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2271
2272 if (slave == 0) {
2273 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2274 return actv_ports;
2275 }
2276
2277 vf = mlx4_get_vf_indx(dev, slave);
2278 if (vf < 0)
2279 return actv_ports;
2280
2281 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2282 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2283 dev->caps.num_ports));
2284
2285 return actv_ports;
2286}
2287EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2288
2289int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2290{
2291 unsigned n;
2292 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2293 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2294
2295 if (port <= 0 || port > m)
2296 return -EINVAL;
2297
2298 n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2299 if (port <= n)
2300 port = n + 1;
2301
2302 return port;
2303}
2304EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2305
2306int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2307{
2308 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2309 if (test_bit(port - 1, actv_ports.ports))
2310 return port -
2311 find_first_bit(actv_ports.ports, dev->caps.num_ports);
2312
2313 return -1;
2314}
2315EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2316
2317struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2318 int port)
2319{
2320 unsigned i;
2321 struct mlx4_slaves_pport slaves_pport;
2322
2323 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2324
2325 if (port <= 0 || port > dev->caps.num_ports)
2326 return slaves_pport;
2327
2328 for (i = 0; i < dev->num_vfs + 1; i++) {
2329 struct mlx4_active_ports actv_ports =
2330 mlx4_get_active_ports(dev, i);
2331 if (test_bit(port - 1, actv_ports.ports))
2332 set_bit(i, slaves_pport.slaves);
2333 }
2334
2335 return slaves_pport;
2336}
2337EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2338
2339struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2340 struct mlx4_dev *dev,
2341 const struct mlx4_active_ports *crit_ports)
2342{
2343 unsigned i;
2344 struct mlx4_slaves_pport slaves_pport;
2345
2346 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2347
2348 for (i = 0; i < dev->num_vfs + 1; i++) {
2349 struct mlx4_active_ports actv_ports =
2350 mlx4_get_active_ports(dev, i);
2351 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2352 dev->caps.num_ports))
2353 set_bit(i, slaves_pport.slaves);
2354 }
2355
2356 return slaves_pport;
2357}
2358EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2359
2237int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) 2360int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2238{ 2361{
2239 struct mlx4_priv *priv = mlx4_priv(dev); 2362 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2289,6 +2412,30 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2289} 2412}
2290EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); 2413EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2291 2414
2415 /* mlx4_get_slave_default_vlan -
2416 * return true if VST ( default vlan)
2417 * if VST, will return vlan & qos (if not NULL)
2418 */
2419bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2420 u16 *vlan, u8 *qos)
2421{
2422 struct mlx4_vport_oper_state *vp_oper;
2423 struct mlx4_priv *priv;
2424
2425 priv = mlx4_priv(dev);
2426 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2427
2428 if (MLX4_VGT != vp_oper->state.default_vlan) {
2429 if (vlan)
2430 *vlan = vp_oper->state.default_vlan;
2431 if (qos)
2432 *qos = vp_oper->state.default_qos;
2433 return true;
2434 }
2435 return false;
2436}
2437EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2438
2292int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) 2439int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2293{ 2440{
2294 struct mlx4_priv *priv = mlx4_priv(dev); 2441 struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index abaf6bb22416..57dda95b67d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -276,6 +276,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
276 .n_alarm = 0, 276 .n_alarm = 0,
277 .n_ext_ts = 0, 277 .n_ext_ts = 0,
278 .n_per_out = 0, 278 .n_per_out = 0,
279 .n_pins = 0,
279 .pps = 0, 280 .pps = 0,
280 .adjfreq = mlx4_en_phc_adjfreq, 281 .adjfreq = mlx4_en_phc_adjfreq,
281 .adjtime = mlx4_en_phc_adjtime, 282 .adjtime = mlx4_en_phc_adjtime,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b4881b686159..c95ca252187c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
62 int has_ets_tc = 0; 62 int has_ets_tc = 0;
63 63
64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
65 if (ets->prio_tc[i] > MLX4_EN_NUM_UP) { 65 if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", 66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
67 i, ets->prio_tc[i]); 67 i, ets->prio_tc[i]);
68 return -EINVAL; 68 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d357bf5a4686..0c59d4fe7e3a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
72MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." 72MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
73 " Per priority bit mask"); 73 " Per priority bit mask");
74 74
75MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
76 "Threshold for using inline data (range: 17-104, default: 104)");
77
78#define MAX_PFC_TX 0xff
79#define MAX_PFC_RX 0xff
80
75int en_print(const char *level, const struct mlx4_en_priv *priv, 81int en_print(const char *level, const struct mlx4_en_priv *priv,
76 const char *format, ...) 82 const char *format, ...)
77{ 83{
@@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
140 params->prof[i].tx_ring_num = params->num_tx_rings_p_up * 146 params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
141 MLX4_EN_NUM_UP; 147 MLX4_EN_NUM_UP;
142 params->prof[i].rss_rings = 0; 148 params->prof[i].rss_rings = 0;
149 params->prof[i].inline_thold = inline_thold;
143 } 150 }
144 151
145 return 0; 152 return 0;
@@ -274,19 +281,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
274 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 281 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
275 mlx4_en_init_timestamp(mdev); 282 mlx4_en_init_timestamp(mdev);
276 283
277 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 284 /* Set default number of RX rings*/
278 if (!dev->caps.comp_pool) { 285 mlx4_en_set_num_rx_rings(mdev);
279 mdev->profile.prof[i].rx_ring_num =
280 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
281 min_t(int,
282 dev->caps.num_comp_vectors,
283 DEF_RX_RINGS)));
284 } else {
285 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
286 min_t(int, dev->caps.comp_pool/
287 dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
288 }
289 }
290 286
291 /* Create our own workqueue for reset/multicast tasks 287 /* Create our own workqueue for reset/multicast tasks
292 * Note: we cannot use the shared workqueue because of deadlocks caused 288 * Note: we cannot use the shared workqueue because of deadlocks caused
@@ -336,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = {
336 .protocol = MLX4_PROT_ETH, 332 .protocol = MLX4_PROT_ETH,
337}; 333};
338 334
335static void mlx4_en_verify_params(void)
336{
337 if (pfctx > MAX_PFC_TX) {
338 pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
339 pfctx, MAX_PFC_TX);
340 pfctx = 0;
341 }
342
343 if (pfcrx > MAX_PFC_RX) {
344 pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
345 pfcrx, MAX_PFC_RX);
346 pfcrx = 0;
347 }
348
349 if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
350 pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
351 inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
352 inline_thold = MAX_INLINE;
353 }
354}
355
339static int __init mlx4_en_init(void) 356static int __init mlx4_en_init(void)
340{ 357{
358 mlx4_en_verify_params();
359
341 return mlx4_register_interface(&mlx4_en_interface); 360 return mlx4_register_interface(&mlx4_en_interface);
342} 361}
343 362
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 84a96f70dfb5..f085c2df5e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -39,6 +39,7 @@
39#include <linux/hash.h> 39#include <linux/hash.h>
40#include <net/ip.h> 40#include <net/ip.h>
41#include <net/busy_poll.h> 41#include <net/busy_poll.h>
42#include <net/vxlan.h>
42 43
43#include <linux/mlx4/driver.h> 44#include <linux/mlx4/driver.h>
44#include <linux/mlx4/device.h> 45#include <linux/mlx4/device.h>
@@ -603,7 +604,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
603 int err = 0; 604 int err = 0;
604 u64 reg_id; 605 u64 reg_id;
605 int *qpn = &priv->base_qpn; 606 int *qpn = &priv->base_qpn;
606 u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 607 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
607 608
608 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 609 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
609 priv->dev->dev_addr); 610 priv->dev->dev_addr);
@@ -672,7 +673,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
672 u64 mac; 673 u64 mac;
673 674
674 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 675 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
675 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 676 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
676 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 677 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
677 priv->dev->dev_addr); 678 priv->dev->dev_addr);
678 mlx4_unregister_mac(dev, priv->port, mac); 679 mlx4_unregister_mac(dev, priv->port, mac);
@@ -685,7 +686,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
685 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 686 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
686 bucket = &priv->mac_hash[i]; 687 bucket = &priv->mac_hash[i];
687 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 688 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
688 mac = mlx4_en_mac_to_u64(entry->mac); 689 mac = mlx4_mac_to_u64(entry->mac);
689 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 690 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
690 entry->mac); 691 entry->mac);
691 mlx4_en_uc_steer_release(priv, entry->mac, 692 mlx4_en_uc_steer_release(priv, entry->mac,
@@ -715,14 +716,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
715 struct mlx4_en_dev *mdev = priv->mdev; 716 struct mlx4_en_dev *mdev = priv->mdev;
716 struct mlx4_dev *dev = mdev->dev; 717 struct mlx4_dev *dev = mdev->dev;
717 int err = 0; 718 int err = 0;
718 u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac); 719 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
719 720
720 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 721 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
721 struct hlist_head *bucket; 722 struct hlist_head *bucket;
722 unsigned int mac_hash; 723 unsigned int mac_hash;
723 struct mlx4_mac_entry *entry; 724 struct mlx4_mac_entry *entry;
724 struct hlist_node *tmp; 725 struct hlist_node *tmp;
725 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); 726 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
726 727
727 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 728 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
728 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 729 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -759,18 +760,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
759 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 760 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
760} 761}
761 762
762u64 mlx4_en_mac_to_u64(u8 *addr)
763{
764 u64 mac = 0;
765 int i;
766
767 for (i = 0; i < ETH_ALEN; i++) {
768 mac <<= 8;
769 mac |= addr[i];
770 }
771 return mac;
772}
773
774static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) 763static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
775{ 764{
776 int err = 0; 765 int err = 0;
@@ -1089,7 +1078,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1089 mlx4_en_cache_mclist(dev); 1078 mlx4_en_cache_mclist(dev);
1090 netif_addr_unlock_bh(dev); 1079 netif_addr_unlock_bh(dev);
1091 list_for_each_entry(mclist, &priv->mc_list, list) { 1080 list_for_each_entry(mclist, &priv->mc_list, list) {
1092 mcast_addr = mlx4_en_mac_to_u64(mclist->addr); 1081 mcast_addr = mlx4_mac_to_u64(mclist->addr);
1093 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 1082 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1094 mcast_addr, 0, MLX4_MCAST_CONFIG); 1083 mcast_addr, 0, MLX4_MCAST_CONFIG);
1095 } 1084 }
@@ -1181,7 +1170,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1181 found = true; 1170 found = true;
1182 1171
1183 if (!found) { 1172 if (!found) {
1184 mac = mlx4_en_mac_to_u64(entry->mac); 1173 mac = mlx4_mac_to_u64(entry->mac);
1185 mlx4_en_uc_steer_release(priv, entry->mac, 1174 mlx4_en_uc_steer_release(priv, entry->mac,
1186 priv->base_qpn, 1175 priv->base_qpn,
1187 entry->reg_id); 1176 entry->reg_id);
@@ -1224,7 +1213,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1224 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1213 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1225 break; 1214 break;
1226 } 1215 }
1227 mac = mlx4_en_mac_to_u64(ha->addr); 1216 mac = mlx4_mac_to_u64(ha->addr);
1228 memcpy(entry->mac, ha->addr, ETH_ALEN); 1217 memcpy(entry->mac, ha->addr, ETH_ALEN);
1229 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1218 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1230 if (err < 0) { 1219 if (err < 0) {
@@ -1677,7 +1666,7 @@ int mlx4_en_start_port(struct net_device *dev)
1677 } 1666 }
1678 1667
1679 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1668 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1680 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC); 1669 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1681 if (err) { 1670 if (err) {
1682 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 1671 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1683 err); 1672 err);
@@ -1709,6 +1698,10 @@ int mlx4_en_start_port(struct net_device *dev)
1709 1698
1710 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 1699 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1711 1700
1701#ifdef CONFIG_MLX4_EN_VXLAN
1702 if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1703 vxlan_get_rx_port(dev);
1704#endif
1712 priv->port_up = true; 1705 priv->port_up = true;
1713 netif_tx_start_all_queues(dev); 1706 netif_tx_start_all_queues(dev);
1714 netif_device_attach(dev); 1707 netif_device_attach(dev);
@@ -2216,7 +2209,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2216{ 2209{
2217 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2210 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2218 struct mlx4_en_dev *mdev = en_priv->mdev; 2211 struct mlx4_en_dev *mdev = en_priv->mdev;
2219 u64 mac_u64 = mlx4_en_mac_to_u64(mac); 2212 u64 mac_u64 = mlx4_mac_to_u64(mac);
2220 2213
2221 if (!is_valid_ether_addr(mac)) 2214 if (!is_valid_ether_addr(mac))
2222 return -EINVAL; 2215 return -EINVAL;
@@ -2276,6 +2269,83 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
2276 return 0; 2269 return 0;
2277} 2270}
2278 2271
2272#ifdef CONFIG_MLX4_EN_VXLAN
2273static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2274{
2275 int ret;
2276 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2277 vxlan_add_task);
2278
2279 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2280 if (ret)
2281 goto out;
2282
2283 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2284 VXLAN_STEER_BY_OUTER_MAC, 1);
2285out:
2286 if (ret)
2287 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2288}
2289
2290static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2291{
2292 int ret;
2293 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2294 vxlan_del_task);
2295
2296 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2297 VXLAN_STEER_BY_OUTER_MAC, 0);
2298 if (ret)
2299 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2300
2301 priv->vxlan_port = 0;
2302}
2303
2304static void mlx4_en_add_vxlan_port(struct net_device *dev,
2305 sa_family_t sa_family, __be16 port)
2306{
2307 struct mlx4_en_priv *priv = netdev_priv(dev);
2308 __be16 current_port;
2309
2310 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
2311 return;
2312
2313 if (sa_family == AF_INET6)
2314 return;
2315
2316 current_port = priv->vxlan_port;
2317 if (current_port && current_port != port) {
2318 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2319 ntohs(current_port), ntohs(port));
2320 return;
2321 }
2322
2323 priv->vxlan_port = port;
2324 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2325}
2326
2327static void mlx4_en_del_vxlan_port(struct net_device *dev,
2328 sa_family_t sa_family, __be16 port)
2329{
2330 struct mlx4_en_priv *priv = netdev_priv(dev);
2331 __be16 current_port;
2332
2333 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2334 return;
2335
2336 if (sa_family == AF_INET6)
2337 return;
2338
2339 current_port = priv->vxlan_port;
2340 if (current_port != port) {
2341 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2342 return;
2343 }
2344
2345 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2346}
2347#endif
2348
2279static const struct net_device_ops mlx4_netdev_ops = { 2349static const struct net_device_ops mlx4_netdev_ops = {
2280 .ndo_open = mlx4_en_open, 2350 .ndo_open = mlx4_en_open,
2281 .ndo_stop = mlx4_en_close, 2351 .ndo_stop = mlx4_en_close,
@@ -2302,6 +2372,10 @@ static const struct net_device_ops mlx4_netdev_ops = {
2302 .ndo_busy_poll = mlx4_en_low_latency_recv, 2372 .ndo_busy_poll = mlx4_en_low_latency_recv,
2303#endif 2373#endif
2304 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2374 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2375#ifdef CONFIG_MLX4_EN_VXLAN
2376 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2377 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2378#endif
2305}; 2379};
2306 2380
2307static const struct net_device_ops mlx4_netdev_ops_master = { 2381static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2351,7 +2425,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2351 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 2425 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2352 2426
2353 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 2427 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
2354 dev->dev_id = port - 1; 2428 dev->dev_port = port - 1;
2355 2429
2356 /* 2430 /*
2357 * Initialize driver private data 2431 * Initialize driver private data
@@ -2393,6 +2467,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2393 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2467 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2394 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2468 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2395 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2469 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2470#ifdef CONFIG_MLX4_EN_VXLAN
2471 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2472 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2473#endif
2396#ifdef CONFIG_MLX4_EN_DCB 2474#ifdef CONFIG_MLX4_EN_DCB
2397 if (!mlx4_is_slave(priv->mdev->dev)) { 2475 if (!mlx4_is_slave(priv->mdev->dev)) {
2398 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 2476 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
@@ -2417,7 +2495,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2417 if (mlx4_is_slave(priv->mdev->dev)) { 2495 if (mlx4_is_slave(priv->mdev->dev)) {
2418 eth_hw_addr_random(dev); 2496 eth_hw_addr_random(dev);
2419 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 2497 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2420 mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr); 2498 mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
2421 mdev->dev->caps.def_mac[priv->port] = mac_u64; 2499 mdev->dev->caps.def_mac[priv->port] = mac_u64;
2422 } else { 2500 } else {
2423 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 2501 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
@@ -2526,7 +2604,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2526 } 2604 }
2527 2605
2528 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 2606 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2529 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC); 2607 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
2530 if (err) { 2608 if (err) {
2531 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 2609 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
2532 err); 2610 err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index dae1a1f4ae55..c2cfb05e7290 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
148 stats->tx_packets = 0; 148 stats->tx_packets = 0;
149 stats->tx_bytes = 0; 149 stats->tx_bytes = 0;
150 priv->port_stats.tx_chksum_offload = 0; 150 priv->port_stats.tx_chksum_offload = 0;
151 priv->port_stats.queue_stopped = 0;
152 priv->port_stats.wake_queue = 0;
153
151 for (i = 0; i < priv->tx_ring_num; i++) { 154 for (i = 0; i < priv->tx_ring_num; i++) {
152 stats->tx_packets += priv->tx_ring[i]->packets; 155 stats->tx_packets += priv->tx_ring[i]->packets;
153 stats->tx_bytes += priv->tx_ring[i]->bytes; 156 stats->tx_bytes += priv->tx_ring[i]->bytes;
154 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; 157 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
158 priv->port_stats.queue_stopped +=
159 priv->tx_ring[i]->queue_stopped;
160 priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
155 } 161 }
156 162
157 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + 163 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 890922c1c8ee..ba049ae88749 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
318 } 318 }
319} 319}
320 320
321void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
322{
323 int i;
324 int num_of_eqs;
325 int num_rx_rings;
326 struct mlx4_dev *dev = mdev->dev;
327
328 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
329 if (!dev->caps.comp_pool)
330 num_of_eqs = max_t(int, MIN_RX_RINGS,
331 min_t(int,
332 dev->caps.num_comp_vectors,
333 DEF_RX_RINGS));
334 else
335 num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
336 dev->caps.comp_pool/
337 dev->caps.num_ports) - 1;
338
339 num_rx_rings = min_t(int, num_of_eqs,
340 netif_get_num_default_rss_queues());
341 mdev->profile.prof[i].rx_ring_num =
342 rounddown_pow_of_two(num_rx_rings);
343 }
344}
345
321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 346int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
322 struct mlx4_en_rx_ring **pring, 347 struct mlx4_en_rx_ring **pring,
323 u32 size, u16 stride, int node) 348 u32 size, u16 stride, int node)
@@ -636,6 +661,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
636 if (!priv->port_up) 661 if (!priv->port_up)
637 return 0; 662 return 0;
638 663
664 if (budget <= 0)
665 return polled;
666
639 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx 667 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
640 * descriptor offset can be deduced from the CQE index instead of 668 * descriptor offset can be deduced from the CQE index instead of
641 * reading 'cqe->index' */ 669 * reading 'cqe->index' */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index c11d063473e5..03e5f6ac67e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
129 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 129 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
130 return -ENOMEM; 130 return -ENOMEM;
131 131
132 /* The device currently only supports 10G speed */ 132 /* The device supports 1G, 10G and 40G speeds */
133 if (priv->port_state.link_speed != SPEED_10000) 133 if (priv->port_state.link_speed != 1000 &&
134 priv->port_state.link_speed != 10000 &&
135 priv->port_state.link_speed != 40000)
134 return priv->port_state.link_speed; 136 return priv->port_state.link_speed;
135 return 0; 137 return 0;
136} 138}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 13457032d15f..dd1f6d346459 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,16 +44,6 @@
44 44
45#include "mlx4_en.h" 45#include "mlx4_en.h"
46 46
47enum {
48 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
49 MAX_BF = 256,
50};
51
52static int inline_thold __read_mostly = MAX_INLINE;
53
54module_param_named(inline_thold, inline_thold, int, 0444);
55MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
56
57int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 47int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
58 struct mlx4_en_tx_ring **pring, int qpn, u32 size, 48 struct mlx4_en_tx_ring **pring, int qpn, u32 size,
59 u16 stride, int node, int queue_index) 49 u16 stride, int node, int queue_index)
@@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
75 ring->size = size; 65 ring->size = size;
76 ring->size_mask = size - 1; 66 ring->size_mask = size - 1;
77 ring->stride = stride; 67 ring->stride = stride;
78 68 ring->inline_thold = priv->prof->inline_thold;
79 inline_thold = min(inline_thold, MAX_INLINE);
80 69
81 tmp = size * sizeof(struct mlx4_en_tx_info); 70 tmp = size * sizeof(struct mlx4_en_tx_info);
82 ring->tx_info = vmalloc_node(tmp, node); 71 ring->tx_info = vmalloc_node(tmp, node);
@@ -325,7 +314,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
325 } 314 }
326 } 315 }
327 } 316 }
328 dev_kfree_skb(skb); 317 dev_kfree_skb_any(skb);
329 return tx_info->nr_txbb; 318 return tx_info->nr_txbb;
330} 319}
331 320
@@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
456 */ 445 */
457 if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { 446 if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
458 netif_tx_wake_queue(ring->tx_queue); 447 netif_tx_wake_queue(ring->tx_queue);
459 priv->port_stats.wake_queue++; 448 ring->wake_queue++;
460 } 449 }
461 return done; 450 return done;
462} 451}
@@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
520 return ring->buf + index * TXBB_SIZE; 509 return ring->buf + index * TXBB_SIZE;
521} 510}
522 511
523static int is_inline(struct sk_buff *skb, void **pfrag) 512static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag)
524{ 513{
525 void *ptr; 514 void *ptr;
526 515
@@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
580 } 569 }
581 } else { 570 } else {
582 *lso_header_size = 0; 571 *lso_header_size = 0;
583 if (!is_inline(skb, NULL)) 572 if (!is_inline(priv->prof->inline_thold, skb, NULL))
584 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; 573 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
585 else 574 else
586 real_size = inline_size(skb); 575 real_size = inline_size(skb);
@@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
596 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; 585 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
597 586
598 if (skb->len <= spc) { 587 if (skb->len <= spc) {
599 inl->byte_count = cpu_to_be32(1 << 31 | skb->len); 588 if (likely(skb->len >= MIN_PKT_LEN)) {
589 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
590 } else {
591 inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
592 memset(((void *)(inl + 1)) + skb->len, 0,
593 MIN_PKT_LEN - skb->len);
594 }
600 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 595 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
601 if (skb_shinfo(skb)->nr_frags) 596 if (skb_shinfo(skb)->nr_frags)
602 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, 597 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
@@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
696 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 691 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
697 /* every full Tx ring stops queue */ 692 /* every full Tx ring stops queue */
698 netif_tx_stop_queue(ring->tx_queue); 693 netif_tx_stop_queue(ring->tx_queue);
699 priv->port_stats.queue_stopped++; 694 ring->queue_stopped++;
700 695
701 /* If queue was emptied after the if, and before the 696 /* If queue was emptied after the if, and before the
702 * stop_queue - need to wake the queue, or else it will remain 697 * stop_queue - need to wake the queue, or else it will remain
@@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
709 if (unlikely(((int)(ring->prod - ring->cons)) <= 704 if (unlikely(((int)(ring->prod - ring->cons)) <=
710 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 705 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
711 netif_tx_wake_queue(ring->tx_queue); 706 netif_tx_wake_queue(ring->tx_queue);
712 priv->port_stats.wake_queue++; 707 ring->wake_queue++;
713 } else { 708 } else {
714 return NETDEV_TX_BUSY; 709 return NETDEV_TX_BUSY;
715 } 710 }
@@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
747 tx_info->data_offset = (void *)data - (void *)tx_desc; 742 tx_info->data_offset = (void *)data - (void *)tx_desc;
748 743
749 tx_info->linear = (lso_header_size < skb_headlen(skb) && 744 tx_info->linear = (lso_header_size < skb_headlen(skb) &&
750 !is_inline(skb, NULL)) ? 1 : 0; 745 !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0;
751 746
752 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; 747 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
753 748
754 if (is_inline(skb, &fragptr)) { 749 if (is_inline(ring->inline_thold, skb, &fragptr)) {
755 tx_info->inl = 1; 750 tx_info->inl = 1;
756 } else { 751 } else {
757 /* Map fragments */ 752 /* Map fragments */
@@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
881 skb_tx_timestamp(skb); 876 skb_tx_timestamp(skb);
882 877
883 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { 878 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
884 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 879 tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn);
880
885 op_own |= htonl((bf_index & 0xffff) << 8); 881 op_own |= htonl((bf_index & 0xffff) << 8);
886 /* Ensure new descirptor hits memory 882 /* Ensure new descirptor hits memory
887 * before setting ownership of this descriptor to HW */ 883 * before setting ownership of this descriptor to HW */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8992b38578d5..d501a2b0fb79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -271,7 +271,10 @@ enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave,
271{ 271{
272 struct mlx4_priv *priv = mlx4_priv(dev); 272 struct mlx4_priv *priv = mlx4_priv(dev);
273 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 273 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
274 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) { 274 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
275
276 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
277 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
275 pr_err("%s: Error: asking for slave:%d, port:%d\n", 278 pr_err("%s: Error: asking for slave:%d, port:%d\n",
276 __func__, slave, port); 279 __func__, slave, port);
277 return SLAVE_PORT_DOWN; 280 return SLAVE_PORT_DOWN;
@@ -285,8 +288,10 @@ static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
285{ 288{
286 struct mlx4_priv *priv = mlx4_priv(dev); 289 struct mlx4_priv *priv = mlx4_priv(dev);
287 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 290 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
291 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
288 292
289 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 293 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
294 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
290 pr_err("%s: Error: asking for slave:%d, port:%d\n", 295 pr_err("%s: Error: asking for slave:%d, port:%d\n",
291 __func__, slave, port); 296 __func__, slave, port);
292 return -1; 297 return -1;
@@ -300,9 +305,13 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
300{ 305{
301 int i; 306 int i;
302 enum slave_port_gen_event gen_event; 307 enum slave_port_gen_event gen_event;
308 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
309 port);
303 310
304 for (i = 0; i < dev->num_slaves; i++) 311 for (i = 0; i < dev->num_vfs + 1; i++)
305 set_and_calc_slave_port_state(dev, i, port, event, &gen_event); 312 if (test_bit(i, slaves_pport.slaves))
313 set_and_calc_slave_port_state(dev, i, port,
314 event, &gen_event);
306} 315}
307/************************************************************************** 316/**************************************************************************
308 The function get as input the new event to that port, 317 The function get as input the new event to that port,
@@ -321,12 +330,14 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
321 struct mlx4_slave_state *ctx = NULL; 330 struct mlx4_slave_state *ctx = NULL;
322 unsigned long flags; 331 unsigned long flags;
323 int ret = -1; 332 int ret = -1;
333 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
324 enum slave_port_state cur_state = 334 enum slave_port_state cur_state =
325 mlx4_get_slave_port_state(dev, slave, port); 335 mlx4_get_slave_port_state(dev, slave, port);
326 336
327 *gen_event = SLAVE_PORT_GEN_EVENT_NONE; 337 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
328 338
329 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 339 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
340 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
330 pr_err("%s: Error: asking for slave:%d, port:%d\n", 341 pr_err("%s: Error: asking for slave:%d, port:%d\n",
331 __func__, slave, port); 342 __func__, slave, port);
332 return ret; 343 return ret;
@@ -542,15 +553,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
542 be64_to_cpu(eqe->event.cmd.out_param)); 553 be64_to_cpu(eqe->event.cmd.out_param));
543 break; 554 break;
544 555
545 case MLX4_EVENT_TYPE_PORT_CHANGE: 556 case MLX4_EVENT_TYPE_PORT_CHANGE: {
557 struct mlx4_slaves_pport slaves_port;
546 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 558 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
559 slaves_port = mlx4_phys_to_slaves_pport(dev, port);
547 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 560 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
548 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 561 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
549 port); 562 port);
550 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 563 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
551 if (!mlx4_is_master(dev)) 564 if (!mlx4_is_master(dev))
552 break; 565 break;
553 for (i = 0; i < dev->num_slaves; i++) { 566 for (i = 0; i < dev->num_vfs + 1; i++) {
567 if (!test_bit(i, slaves_port.slaves))
568 continue;
554 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 569 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
555 if (i == mlx4_master_func_num(dev)) 570 if (i == mlx4_master_func_num(dev))
556 continue; 571 continue;
@@ -558,8 +573,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
558 " to slave: %d, port:%d\n", 573 " to slave: %d, port:%d\n",
559 __func__, i, port); 574 __func__, i, port);
560 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 575 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
561 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) 576 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
577 eqe->event.port_change.port =
578 cpu_to_be32(
579 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
580 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
562 mlx4_slave_event(dev, i, eqe); 581 mlx4_slave_event(dev, i, eqe);
582 }
563 } else { /* IB port */ 583 } else { /* IB port */
564 set_and_calc_slave_port_state(dev, i, port, 584 set_and_calc_slave_port_state(dev, i, port,
565 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 585 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,12 +600,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
580 if (!mlx4_is_master(dev)) 600 if (!mlx4_is_master(dev))
581 break; 601 break;
582 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 602 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
583 for (i = 0; i < dev->num_slaves; i++) { 603 for (i = 0; i < dev->num_vfs + 1; i++) {
604 if (!test_bit(i, slaves_port.slaves))
605 continue;
584 if (i == mlx4_master_func_num(dev)) 606 if (i == mlx4_master_func_num(dev))
585 continue; 607 continue;
586 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 608 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
587 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) 609 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
610 eqe->event.port_change.port =
611 cpu_to_be32(
612 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
613 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
588 mlx4_slave_event(dev, i, eqe); 614 mlx4_slave_event(dev, i, eqe);
615 }
589 } 616 }
590 else /* IB port */ 617 else /* IB port */
591 /* port-up event will be sent to a slave when the 618 /* port-up event will be sent to a slave when the
@@ -594,6 +621,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
594 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); 621 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
595 } 622 }
596 break; 623 break;
624 }
597 625
598 case MLX4_EVENT_TYPE_CQ_ERROR: 626 case MLX4_EVENT_TYPE_CQ_ERROR:
599 mlx4_warn(dev, "CQ %s on CQN %06x\n", 627 mlx4_warn(dev, "CQ %s on CQN %06x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7e2995ecea6f..d16a4d118903 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -225,13 +225,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
225#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 225#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
226 226
227 if (vhcr->op_modifier == 1) { 227 if (vhcr->op_modifier == 1) {
228 struct mlx4_active_ports actv_ports =
229 mlx4_get_active_ports(dev, slave);
230 int converted_port = mlx4_slave_convert_port(
231 dev, slave, vhcr->in_modifier);
232
233 if (converted_port < 0)
234 return -EINVAL;
235
236 vhcr->in_modifier = converted_port;
228 /* Set nic_info bit to mark new fields support */ 237 /* Set nic_info bit to mark new fields support */
229 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 238 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
230 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 239 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
231 240
232 field = vhcr->in_modifier; /* phys-port = logical-port */ 241 /* phys-port = logical-port */
242 field = vhcr->in_modifier -
243 find_first_bit(actv_ports.ports, dev->caps.num_ports);
233 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 244 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
234 245
246 field = vhcr->in_modifier;
235 /* size is now the QP number */ 247 /* size is now the QP number */
236 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1; 248 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
237 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 249 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
@@ -249,12 +261,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
249 QUERY_FUNC_CAP_PHYS_PORT_ID); 261 QUERY_FUNC_CAP_PHYS_PORT_ID);
250 262
251 } else if (vhcr->op_modifier == 0) { 263 } else if (vhcr->op_modifier == 0) {
264 struct mlx4_active_ports actv_ports =
265 mlx4_get_active_ports(dev, slave);
252 /* enable rdma and ethernet interfaces, and new quota locations */ 266 /* enable rdma and ethernet interfaces, and new quota locations */
253 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 267 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
254 QUERY_FUNC_CAP_FLAG_QUOTAS); 268 QUERY_FUNC_CAP_FLAG_QUOTAS);
255 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 269 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
256 270
257 field = dev->caps.num_ports; 271 field = min(
272 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
273 dev->caps.num_ports);
258 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 274 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
259 275
260 size = dev->caps.function_caps; /* set PF behaviours */ 276 size = dev->caps.function_caps; /* set PF behaviours */
@@ -840,6 +856,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
840 int err = 0; 856 int err = 0;
841 u8 field; 857 u8 field;
842 u32 bmme_flags; 858 u32 bmme_flags;
859 int real_port;
860 int slave_port;
861 int first_port;
862 struct mlx4_active_ports actv_ports;
843 863
844 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 864 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
845 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 865 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -852,8 +872,26 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
852 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 872 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
853 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 873 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
854 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 874 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
875 actv_ports = mlx4_get_active_ports(dev, slave);
876 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
877 for (slave_port = 0, real_port = first_port;
878 real_port < first_port +
879 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
880 ++real_port, ++slave_port) {
881 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
882 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
883 else
884 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
885 }
886 for (; slave_port < dev->caps.num_ports; ++slave_port)
887 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
855 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 888 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
856 889
890 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
891 field &= ~0x0F;
892 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
893 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
894
857 /* For guests, disable timestamp */ 895 /* For guests, disable timestamp */
858 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 896 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
859 field &= 0x7f; 897 field &= 0x7f;
@@ -903,12 +941,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
903 u16 short_field; 941 u16 short_field;
904 int err; 942 int err;
905 int admin_link_state; 943 int admin_link_state;
944 int port = mlx4_slave_convert_port(dev, slave,
945 vhcr->in_modifier & 0xFF);
906 946
907#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 947#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
908#define MLX4_PORT_LINK_UP_MASK 0x80 948#define MLX4_PORT_LINK_UP_MASK 0x80
909#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 949#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
910#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 950#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
911 951
952 if (port < 0)
953 return -EINVAL;
954
955 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
956 (port & 0xFF);
957
912 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 958 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
913 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 959 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
914 MLX4_CMD_NATIVE); 960 MLX4_CMD_NATIVE);
@@ -935,7 +981,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
935 MLX4_PUT(outbox->buf, port_type, 981 MLX4_PUT(outbox->buf, port_type,
936 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 982 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
937 983
938 short_field = 1; /* slave max gids */ 984 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
985 short_field = mlx4_get_slave_num_gids(dev, slave, port);
986 else
987 short_field = 1; /* slave max gids */
939 MLX4_PUT(outbox->buf, short_field, 988 MLX4_PUT(outbox->buf, short_field,
940 QUERY_PORT_CUR_MAX_GID_OFFSET); 989 QUERY_PORT_CUR_MAX_GID_OFFSET);
941 990
@@ -1585,9 +1634,12 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1585 struct mlx4_cmd_info *cmd) 1634 struct mlx4_cmd_info *cmd)
1586{ 1635{
1587 struct mlx4_priv *priv = mlx4_priv(dev); 1636 struct mlx4_priv *priv = mlx4_priv(dev);
1588 int port = vhcr->in_modifier; 1637 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1589 int err; 1638 int err;
1590 1639
1640 if (port < 0)
1641 return -EINVAL;
1642
1591 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 1643 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1592 return 0; 1644 return 0;
1593 1645
@@ -1677,9 +1729,12 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1677 struct mlx4_cmd_info *cmd) 1729 struct mlx4_cmd_info *cmd)
1678{ 1730{
1679 struct mlx4_priv *priv = mlx4_priv(dev); 1731 struct mlx4_priv *priv = mlx4_priv(dev);
1680 int port = vhcr->in_modifier; 1732 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1681 int err; 1733 int err;
1682 1734
1735 if (port < 0)
1736 return -EINVAL;
1737
1683 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 1738 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1684 (1 << port))) 1739 (1 << port)))
1685 return 0; 1740 return 0;
@@ -1724,6 +1779,46 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1724 MLX4_CMD_NATIVE); 1779 MLX4_CMD_NATIVE);
1725} 1780}
1726 1781
1782struct mlx4_config_dev {
1783 __be32 update_flags;
1784 __be32 rsdv1[3];
1785 __be16 vxlan_udp_dport;
1786 __be16 rsvd2;
1787};
1788
1789#define MLX4_VXLAN_UDP_DPORT (1 << 0)
1790
1791static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
1792{
1793 int err;
1794 struct mlx4_cmd_mailbox *mailbox;
1795
1796 mailbox = mlx4_alloc_cmd_mailbox(dev);
1797 if (IS_ERR(mailbox))
1798 return PTR_ERR(mailbox);
1799
1800 memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
1801
1802 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
1803 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1804
1805 mlx4_free_cmd_mailbox(dev, mailbox);
1806 return err;
1807}
1808
1809int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
1810{
1811 struct mlx4_config_dev config_dev;
1812
1813 memset(&config_dev, 0, sizeof(config_dev));
1814 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
1815 config_dev.vxlan_udp_dport = udp_port;
1816
1817 return mlx4_CONFIG_DEV(dev, &config_dev);
1818}
1819EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
1820
1821
1727int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 1822int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1728{ 1823{
1729 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 1824 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
@@ -1891,7 +1986,8 @@ void mlx4_opreq_action(struct work_struct *work)
1891 err = EINVAL; 1986 err = EINVAL;
1892 break; 1987 break;
1893 } 1988 }
1894 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1989 err = mlx4_cmd(dev, 0, ((u32) err |
1990 (__force u32)cpu_to_be32(token) << 16),
1895 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 1991 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1896 MLX4_CMD_NATIVE); 1992 MLX4_CMD_NATIVE);
1897 if (err) { 1993 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d413e60071d4..f0ae95f66ceb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,7 +41,6 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h> 42#include <linux/io-mapping.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/netdevice.h>
45#include <linux/kmod.h> 44#include <linux/kmod.h>
46 45
47#include <linux/mlx4/device.h> 46#include <linux/mlx4/device.h>
@@ -78,13 +77,17 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
78 77
79#endif /* CONFIG_PCI_MSI */ 78#endif /* CONFIG_PCI_MSI */
80 79
81static int num_vfs; 80static uint8_t num_vfs[3] = {0, 0, 0};
82module_param(num_vfs, int, 0444); 81static int num_vfs_argc = 3;
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 82module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
84 85
85static int probe_vf; 86static uint8_t probe_vf[3] = {0, 0, 0};
86module_param(probe_vf, int, 0644); 87static int probe_vfs_argc = 3;
87MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
88 91
89int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 92int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
90module_param_named(log_num_mgm_entry_size, 93module_param_named(log_num_mgm_entry_size,
@@ -1470,7 +1473,11 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1470 int i; 1473 int i;
1471 1474
1472 for (i = 1; i <= dev->caps.num_ports; i++) { 1475 for (i = 1; i <= dev->caps.num_ports; i++) {
1473 dev->caps.gid_table_len[i] = 1; 1476 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1477 dev->caps.gid_table_len[i] =
1478 mlx4_get_slave_num_gids(dev, 0, i);
1479 else
1480 dev->caps.gid_table_len[i] = 1;
1474 dev->caps.pkey_table_len[i] = 1481 dev->caps.pkey_table_len[i] =
1475 dev->phys_caps.pkey_phys_table_len[i] - 1; 1482 dev->phys_caps.pkey_phys_table_len[i] - 1;
1476 } 1483 }
@@ -1495,7 +1502,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1495 if (mlx4_log_num_mgm_entry_size == -1 && 1502 if (mlx4_log_num_mgm_entry_size == -1 &&
1496 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1503 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1497 (!mlx4_is_mfunc(dev) || 1504 (!mlx4_is_mfunc(dev) ||
1498 (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) && 1505 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
1499 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1506 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1500 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1507 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1501 dev->oper_log_mgm_entry_size = 1508 dev->oper_log_mgm_entry_size =
@@ -1981,9 +1988,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1981 struct mlx4_priv *priv = mlx4_priv(dev); 1988 struct mlx4_priv *priv = mlx4_priv(dev);
1982 struct msix_entry *entries; 1989 struct msix_entry *entries;
1983 int nreq = min_t(int, dev->caps.num_ports * 1990 int nreq = min_t(int, dev->caps.num_ports *
1984 min_t(int, netif_get_num_default_rss_queues() + 1, 1991 min_t(int, num_online_cpus() + 1,
1985 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); 1992 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
1986 int err;
1987 int i; 1993 int i;
1988 1994
1989 if (msi_x) { 1995 if (msi_x) {
@@ -1997,23 +2003,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1997 for (i = 0; i < nreq; ++i) 2003 for (i = 0; i < nreq; ++i)
1998 entries[i].entry = i; 2004 entries[i].entry = i;
1999 2005
2000 retry: 2006 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
2001 err = pci_enable_msix(dev->pdev, entries, nreq); 2007
2002 if (err) { 2008 if (nreq < 0) {
2003 /* Try again if at least 2 vectors are available */
2004 if (err > 1) {
2005 mlx4_info(dev, "Requested %d vectors, "
2006 "but only %d MSI-X vectors available, "
2007 "trying again\n", nreq, err);
2008 nreq = err;
2009 goto retry;
2010 }
2011 kfree(entries); 2009 kfree(entries);
2012 goto no_msi; 2010 goto no_msi;
2013 } 2011 } else if (nreq < MSIX_LEGACY_SZ +
2014 2012 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2015 if (nreq <
2016 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
2017 /*Working in legacy mode , all EQ's shared*/ 2013 /*Working in legacy mode , all EQ's shared*/
2018 dev->caps.comp_pool = 0; 2014 dev->caps.comp_pool = 0;
2019 dev->caps.num_comp_vectors = nreq - 1; 2015 dev->caps.num_comp_vectors = nreq - 1;
@@ -2201,6 +2197,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2201 struct mlx4_dev *dev; 2197 struct mlx4_dev *dev;
2202 int err; 2198 int err;
2203 int port; 2199 int port;
2200 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2201 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2202 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
2203 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
2204 unsigned total_vfs = 0;
2205 int sriov_initialized = 0;
2206 unsigned int i;
2204 2207
2205 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 2208 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
2206 2209
@@ -2215,17 +2218,40 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2215 * per port, we must limit the number of VFs to 63 (since their are 2218 * per port, we must limit the number of VFs to 63 (since their are
2216 * 128 MACs) 2219 * 128 MACs)
2217 */ 2220 */
2218 if (num_vfs >= MLX4_MAX_NUM_VF) { 2221 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
2222 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
2223 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
2224 if (nvfs[i] < 0) {
2225 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
2226 return -EINVAL;
2227 }
2228 }
2229 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
2230 i++) {
2231 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
2232 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
2233 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2234 return -EINVAL;
2235 }
2236 }
2237 if (total_vfs >= MLX4_MAX_NUM_VF) {
2219 dev_err(&pdev->dev, 2238 dev_err(&pdev->dev,
2220 "Requested more VF's (%d) than allowed (%d)\n", 2239 "Requested more VF's (%d) than allowed (%d)\n",
2221 num_vfs, MLX4_MAX_NUM_VF - 1); 2240 total_vfs, MLX4_MAX_NUM_VF - 1);
2222 return -EINVAL; 2241 return -EINVAL;
2223 } 2242 }
2224 2243
2225 if (num_vfs < 0) { 2244 for (i = 0; i < MLX4_MAX_PORTS; i++) {
2226 pr_err("num_vfs module parameter cannot be negative\n"); 2245 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
2227 return -EINVAL; 2246 dev_err(&pdev->dev,
2247 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2248 nvfs[i] + nvfs[2], i + 1,
2249 MLX4_MAX_NUM_VF_P_PORT - 1);
2250 return -EINVAL;
2251 }
2228 } 2252 }
2253
2254
2229 /* 2255 /*
2230 * Check for BARs. 2256 * Check for BARs.
2231 */ 2257 */
@@ -2300,11 +2326,23 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2300 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2326 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2301 /* When acting as pf, we normally skip vfs unless explicitly 2327 /* When acting as pf, we normally skip vfs unless explicitly
2302 * requested to probe them. */ 2328 * requested to probe them. */
2303 if (num_vfs && extended_func_num(pdev) > probe_vf) { 2329 if (total_vfs) {
2304 mlx4_warn(dev, "Skipping virtual function:%d\n", 2330 unsigned vfs_offset = 0;
2305 extended_func_num(pdev)); 2331 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2306 err = -ENODEV; 2332 vfs_offset + nvfs[i] < extended_func_num(pdev);
2307 goto err_free_dev; 2333 vfs_offset += nvfs[i], i++)
2334 ;
2335 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
2336 err = -ENODEV;
2337 goto err_free_dev;
2338 }
2339 if ((extended_func_num(pdev) - vfs_offset)
2340 > prb_vf[i]) {
2341 mlx4_warn(dev, "Skipping virtual function:%d\n",
2342 extended_func_num(pdev));
2343 err = -ENODEV;
2344 goto err_free_dev;
2345 }
2308 } 2346 }
2309 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2347 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2310 dev->flags |= MLX4_FLAG_SLAVE; 2348 dev->flags |= MLX4_FLAG_SLAVE;
@@ -2324,22 +2362,30 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2324 } 2362 }
2325 } 2363 }
2326 2364
2327 if (num_vfs) { 2365 if (total_vfs) {
2328 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); 2366 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2329 2367 total_vfs);
2330 atomic_inc(&pf_loading); 2368 dev->dev_vfs = kzalloc(
2331 err = pci_enable_sriov(pdev, num_vfs); 2369 total_vfs * sizeof(*dev->dev_vfs),
2332 atomic_dec(&pf_loading); 2370 GFP_KERNEL);
2333 2371 if (NULL == dev->dev_vfs) {
2334 if (err) { 2372 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2335 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2336 err);
2337 err = 0; 2373 err = 0;
2338 } else { 2374 } else {
2339 mlx4_warn(dev, "Running in master mode\n"); 2375 atomic_inc(&pf_loading);
2340 dev->flags |= MLX4_FLAG_SRIOV | 2376 err = pci_enable_sriov(pdev, total_vfs);
2341 MLX4_FLAG_MASTER; 2377 atomic_dec(&pf_loading);
2342 dev->num_vfs = num_vfs; 2378 if (err) {
2379 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2380 err);
2381 err = 0;
2382 } else {
2383 mlx4_warn(dev, "Running in master mode\n");
2384 dev->flags |= MLX4_FLAG_SRIOV |
2385 MLX4_FLAG_MASTER;
2386 dev->num_vfs = total_vfs;
2387 sriov_initialized = 1;
2388 }
2343 } 2389 }
2344 } 2390 }
2345 2391
@@ -2404,12 +2450,37 @@ slave_start:
2404 /* In master functions, the communication channel must be initialized 2450 /* In master functions, the communication channel must be initialized
2405 * after obtaining its address from fw */ 2451 * after obtaining its address from fw */
2406 if (mlx4_is_master(dev)) { 2452 if (mlx4_is_master(dev)) {
2453 unsigned sum = 0;
2407 err = mlx4_multi_func_init(dev); 2454 err = mlx4_multi_func_init(dev);
2408 if (err) { 2455 if (err) {
2409 mlx4_err(dev, "Failed to init master mfunc" 2456 mlx4_err(dev, "Failed to init master mfunc"
2410 "interface, aborting.\n"); 2457 "interface, aborting.\n");
2411 goto err_close; 2458 goto err_close;
2412 } 2459 }
2460 if (sriov_initialized) {
2461 int ib_ports = 0;
2462 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2463 ib_ports++;
2464
2465 if (ib_ports &&
2466 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2467 mlx4_err(dev,
2468 "Invalid syntax of num_vfs/probe_vfs "
2469 "with IB port. Single port VFs syntax"
2470 " is only supported when all ports "
2471 "are configured as ethernet\n");
2472 goto err_close;
2473 }
2474 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
2475 unsigned j;
2476 for (j = 0; j < nvfs[i]; ++sum, ++j) {
2477 dev->dev_vfs[sum].min_port =
2478 i < 2 ? i + 1 : 1;
2479 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2480 dev->caps.num_ports;
2481 }
2482 }
2483 }
2413 } 2484 }
2414 2485
2415 err = mlx4_alloc_eq_table(dev); 2486 err = mlx4_alloc_eq_table(dev);
@@ -2517,6 +2588,8 @@ err_rel_own:
2517 if (!mlx4_is_slave(dev)) 2588 if (!mlx4_is_slave(dev))
2518 mlx4_free_ownership(dev); 2589 mlx4_free_ownership(dev);
2519 2590
2591 kfree(priv->dev.dev_vfs);
2592
2520err_free_dev: 2593err_free_dev:
2521 kfree(priv); 2594 kfree(priv);
2522 2595
@@ -2603,6 +2676,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2603 kfree(dev->caps.qp0_proxy); 2676 kfree(dev->caps.qp0_proxy);
2604 kfree(dev->caps.qp1_tunnel); 2677 kfree(dev->caps.qp1_tunnel);
2605 kfree(dev->caps.qp1_proxy); 2678 kfree(dev->caps.qp1_proxy);
2679 kfree(dev->dev_vfs);
2606 2680
2607 kfree(priv); 2681 kfree(priv);
2608 pci_release_regions(pdev); 2682 pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index db7dc0b6667d..80ccb4edf825 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1387,9 +1387,12 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1387 struct mlx4_cmd_info *cmd) 1387 struct mlx4_cmd_info *cmd)
1388{ 1388{
1389 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1389 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1390 u8 port = vhcr->in_param >> 62; 1390 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
1391 enum mlx4_steer_type steer = vhcr->in_modifier; 1391 enum mlx4_steer_type steer = vhcr->in_modifier;
1392 1392
1393 if (port < 0)
1394 return -EINVAL;
1395
1393 /* Promiscuous unicast is not allowed in mfunc */ 1396 /* Promiscuous unicast is not allowed in mfunc */
1394 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1397 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
1395 return 0; 1398 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7aec6c833973..cf8be41abb36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -788,6 +788,10 @@ enum {
788 MLX4_USE_RR = 1, 788 MLX4_USE_RR = 1,
789}; 789};
790 790
791struct mlx4_roce_gid_entry {
792 u8 raw[16];
793};
794
791struct mlx4_priv { 795struct mlx4_priv {
792 struct mlx4_dev dev; 796 struct mlx4_dev dev;
793 797
@@ -834,6 +838,7 @@ struct mlx4_priv {
834 int fs_hash_mode; 838 int fs_hash_mode;
835 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 839 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
836 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 840 __be64 slave_node_guids[MLX4_MFUNC_MAX];
841 struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
837 842
838 atomic_t opreq_count; 843 atomic_t opreq_count;
839 struct work_struct opreq_task; 844 struct work_struct opreq_task;
@@ -1242,11 +1247,6 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
1242 struct mlx4_cmd_mailbox *inbox, 1247 struct mlx4_cmd_mailbox *inbox,
1243 struct mlx4_cmd_mailbox *outbox, 1248 struct mlx4_cmd_mailbox *outbox,
1244 struct mlx4_cmd_info *cmd); 1249 struct mlx4_cmd_info *cmd);
1245int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
1246 struct mlx4_vhcr *vhcr,
1247 struct mlx4_cmd_mailbox *inbox,
1248 struct mlx4_cmd_mailbox *outbox,
1249 struct mlx4_cmd_info *cmd);
1250 1250
1251int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); 1251int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
1252int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); 1252int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
@@ -1282,4 +1282,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
1282 1282
1283void mlx4_init_quotas(struct mlx4_dev *dev); 1283void mlx4_init_quotas(struct mlx4_dev *dev);
1284 1284
1285int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1286/* Returns the VF index of slave */
1287int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1288
1285#endif /* MLX4_H */ 1289#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b57e8c87a34e..7a733c287744 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -187,6 +187,13 @@ enum {
187#define GET_AVG_PERF_COUNTER(cnt) (0) 187#define GET_AVG_PERF_COUNTER(cnt) (0)
188#endif /* MLX4_EN_PERF_STAT */ 188#endif /* MLX4_EN_PERF_STAT */
189 189
190/* Constants for TX flow */
191enum {
192 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
193 MAX_BF = 256,
194 MIN_PKT_LEN = 17,
195};
196
190/* 197/*
191 * Configurables 198 * Configurables
192 */ 199 */
@@ -267,10 +274,13 @@ struct mlx4_en_tx_ring {
267 unsigned long bytes; 274 unsigned long bytes;
268 unsigned long packets; 275 unsigned long packets;
269 unsigned long tx_csum; 276 unsigned long tx_csum;
277 unsigned long queue_stopped;
278 unsigned long wake_queue;
270 struct mlx4_bf bf; 279 struct mlx4_bf bf;
271 bool bf_enabled; 280 bool bf_enabled;
272 struct netdev_queue *tx_queue; 281 struct netdev_queue *tx_queue;
273 int hwtstamp_tx_type; 282 int hwtstamp_tx_type;
283 int inline_thold;
274}; 284};
275 285
276struct mlx4_en_rx_desc { 286struct mlx4_en_rx_desc {
@@ -346,6 +356,7 @@ struct mlx4_en_port_profile {
346 u8 tx_pause; 356 u8 tx_pause;
347 u8 tx_ppp; 357 u8 tx_ppp;
348 int rss_rings; 358 int rss_rings;
359 int inline_thold;
349}; 360};
350 361
351struct mlx4_en_profile { 362struct mlx4_en_profile {
@@ -548,6 +559,10 @@ struct mlx4_en_priv {
548 struct work_struct linkstate_task; 559 struct work_struct linkstate_task;
549 struct delayed_work stats_task; 560 struct delayed_work stats_task;
550 struct delayed_work service_task; 561 struct delayed_work service_task;
562#ifdef CONFIG_MLX4_EN_VXLAN
563 struct work_struct vxlan_add_task;
564 struct work_struct vxlan_del_task;
565#endif
551 struct mlx4_en_perf_stats pstats; 566 struct mlx4_en_perf_stats pstats;
552 struct mlx4_en_pkt_stats pkstats; 567 struct mlx4_en_pkt_stats pkstats;
553 struct mlx4_en_port_stats port_stats; 568 struct mlx4_en_port_stats port_stats;
@@ -574,6 +589,7 @@ struct mlx4_en_priv {
574 struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT]; 589 struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
575#endif 590#endif
576 u64 tunnel_reg_id; 591 u64 tunnel_reg_id;
592 __be16 vxlan_port;
577}; 593};
578 594
579enum mlx4_en_wol { 595enum mlx4_en_wol {
@@ -737,7 +753,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
737 int cq, int user_prio); 753 int cq, int user_prio);
738void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 754void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
739 struct mlx4_en_tx_ring *ring); 755 struct mlx4_en_tx_ring *ring);
740 756void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
741int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 757int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
742 struct mlx4_en_rx_ring **pring, 758 struct mlx4_en_rx_ring **pring,
743 u32 size, u16 stride, int node); 759 u32 size, u16 stride, int node);
@@ -786,7 +802,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
786 802
787#define MLX4_EN_NUM_SELF_TEST 5 803#define MLX4_EN_NUM_SELF_TEST 5
788void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); 804void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
789u64 mlx4_en_mac_to_u64(u8 *addr);
790void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); 805void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
791 806
792/* 807/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a58bcbf1b806..cfcad26ed40f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -505,6 +505,84 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
505 mlx4_free_cmd_mailbox(dev, outmailbox); 505 mlx4_free_cmd_mailbox(dev, outmailbox);
506 return err; 506 return err;
507} 507}
508static struct mlx4_roce_gid_entry zgid_entry;
509
510int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
511{
512 int vfs;
513 int slave_gid = slave;
514 unsigned i;
515 struct mlx4_slaves_pport slaves_pport;
516 struct mlx4_active_ports actv_ports;
517 unsigned max_port_p_one;
518
519 if (slave == 0)
520 return MLX4_ROCE_PF_GIDS;
521
522 /* Slave is a VF */
523 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
524 actv_ports = mlx4_get_active_ports(dev, slave);
525 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
526 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
527
528 for (i = 1; i < max_port_p_one; i++) {
529 struct mlx4_active_ports exclusive_ports;
530 struct mlx4_slaves_pport slaves_pport_actv;
531 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
532 set_bit(i - 1, exclusive_ports.ports);
533 if (i == port)
534 continue;
535 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
536 dev, &exclusive_ports);
537 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
538 dev->num_vfs + 1);
539 }
540 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
541 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
542 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
543 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
544}
545
546int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
547{
548 int gids;
549 unsigned i;
550 int slave_gid = slave;
551 int vfs;
552
553 struct mlx4_slaves_pport slaves_pport;
554 struct mlx4_active_ports actv_ports;
555 unsigned max_port_p_one;
556
557 if (slave == 0)
558 return 0;
559
560 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
561 actv_ports = mlx4_get_active_ports(dev, slave);
562 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
563 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
564
565 for (i = 1; i < max_port_p_one; i++) {
566 struct mlx4_active_ports exclusive_ports;
567 struct mlx4_slaves_pport slaves_pport_actv;
568 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
569 set_bit(i - 1, exclusive_ports.ports);
570 if (i == port)
571 continue;
572 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
573 dev, &exclusive_ports);
574 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
575 dev->num_vfs + 1);
576 }
577 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
578 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
579 if (slave_gid <= gids % vfs)
580 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
581
582 return MLX4_ROCE_PF_GIDS + (gids % vfs) +
583 ((gids / vfs) * (slave_gid - 1));
584}
585EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
508 586
509static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 587static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
510 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 588 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
@@ -515,14 +593,18 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
515 struct mlx4_slave_state *slave_st = &master->slave_state[slave]; 593 struct mlx4_slave_state *slave_st = &master->slave_state[slave];
516 struct mlx4_set_port_rqp_calc_context *qpn_context; 594 struct mlx4_set_port_rqp_calc_context *qpn_context;
517 struct mlx4_set_port_general_context *gen_context; 595 struct mlx4_set_port_general_context *gen_context;
596 struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
518 int reset_qkey_viols; 597 int reset_qkey_viols;
519 int port; 598 int port;
520 int is_eth; 599 int is_eth;
600 int num_gids;
601 int base;
521 u32 in_modifier; 602 u32 in_modifier;
522 u32 promisc; 603 u32 promisc;
523 u16 mtu, prev_mtu; 604 u16 mtu, prev_mtu;
524 int err; 605 int err;
525 int i; 606 int i, j;
607 int offset;
526 __be32 agg_cap_mask; 608 __be32 agg_cap_mask;
527 __be32 slave_cap_mask; 609 __be32 slave_cap_mask;
528 __be32 new_cap_mask; 610 __be32 new_cap_mask;
@@ -535,7 +617,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
535 /* Slaves cannot perform SET_PORT operations except changing MTU */ 617 /* Slaves cannot perform SET_PORT operations except changing MTU */
536 if (is_eth) { 618 if (is_eth) {
537 if (slave != dev->caps.function && 619 if (slave != dev->caps.function &&
538 in_modifier != MLX4_SET_PORT_GENERAL) { 620 in_modifier != MLX4_SET_PORT_GENERAL &&
621 in_modifier != MLX4_SET_PORT_GID_TABLE) {
539 mlx4_warn(dev, "denying SET_PORT for slave:%d\n", 622 mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
540 slave); 623 slave);
541 return -EINVAL; 624 return -EINVAL;
@@ -581,6 +664,67 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
581 664
582 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 665 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
583 break; 666 break;
667 case MLX4_SET_PORT_GID_TABLE:
668 /* change to MULTIPLE entries: number of guest's gids
669 * need a FOR-loop here over number of gids the guest has.
670 * 1. Check no duplicates in gids passed by slave
671 */
672 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
673 base = mlx4_get_base_gid_ix(dev, slave, port);
674 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
675 for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
676 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
677 sizeof(zgid_entry)))
678 continue;
679 gid_entry_mb1 = gid_entry_mbox + 1;
680 for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
681 if (!memcmp(gid_entry_mb1->raw,
682 zgid_entry.raw, sizeof(zgid_entry)))
683 continue;
684 if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
685 sizeof(gid_entry_mbox->raw))) {
686 /* found duplicate */
687 return -EINVAL;
688 }
689 }
690 }
691
692 /* 2. Check that do not have duplicates in OTHER
693 * entries in the port GID table
694 */
695 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
696 if (i >= base && i < base + num_gids)
697 continue; /* don't compare to slave's current gids */
698 gid_entry_tbl = &priv->roce_gids[port - 1][i];
699 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
700 continue;
701 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
702 for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
703 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
704 sizeof(zgid_entry)))
705 continue;
706 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
707 sizeof(gid_entry_tbl->raw))) {
708 /* found duplicate */
709 mlx4_warn(dev, "requested gid entry for slave:%d "
710 "is a duplicate of gid at index %d\n",
711 slave, i);
712 return -EINVAL;
713 }
714 }
715 }
716
717 /* insert slave GIDs with memcpy, starting at slave's base index */
718 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
719 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
720 memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16);
721
722 /* Now, copy roce port gids table to current mailbox for passing to FW */
723 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
724 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
725 memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16);
726
727 break;
584 } 728 }
585 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 729 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
586 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 730 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
@@ -646,6 +790,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
646 struct mlx4_cmd_mailbox *outbox, 790 struct mlx4_cmd_mailbox *outbox,
647 struct mlx4_cmd_info *cmd) 791 struct mlx4_cmd_info *cmd)
648{ 792{
793 int port = mlx4_slave_convert_port(
794 dev, slave, vhcr->in_modifier & 0xFF);
795
796 if (port < 0)
797 return -EINVAL;
798
799 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
800 (port & 0xFF);
801
649 return mlx4_common_set_port(dev, slave, vhcr->in_modifier, 802 return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
650 vhcr->op_modifier, inbox); 803 vhcr->op_modifier, inbox);
651} 804}
@@ -835,7 +988,7 @@ struct mlx4_set_port_vxlan_context {
835 u8 steering; 988 u8 steering;
836}; 989};
837 990
838int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering) 991int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
839{ 992{
840 int err; 993 int err;
841 u32 in_mod; 994 u32 in_mod;
@@ -849,7 +1002,8 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
849 memset(context, 0, sizeof(*context)); 1002 memset(context, 0, sizeof(*context));
850 1003
851 context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; 1004 context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
852 context->enable_flags = VXLAN_ENABLE; 1005 if (enable)
1006 context->enable_flags = VXLAN_ENABLE;
853 context->steering = steering; 1007 context->steering = steering;
854 1008
855 in_mod = MLX4_SET_PORT_VXLAN << 8 | port; 1009 in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
@@ -927,3 +1081,108 @@ void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
927 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; 1081 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
928} 1082}
929EXPORT_SYMBOL(mlx4_set_stats_bitmap); 1083EXPORT_SYMBOL(mlx4_set_stats_bitmap);
1084
1085int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1086 int *slave_id)
1087{
1088 struct mlx4_priv *priv = mlx4_priv(dev);
1089 int i, found_ix = -1;
1090 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1091 struct mlx4_slaves_pport slaves_pport;
1092 unsigned num_vfs;
1093 int slave_gid;
1094
1095 if (!mlx4_is_mfunc(dev))
1096 return -EINVAL;
1097
1098 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1099 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1100
1101 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1102 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
1103 found_ix = i;
1104 break;
1105 }
1106 }
1107
1108 if (found_ix >= 0) {
1109 if (found_ix < MLX4_ROCE_PF_GIDS)
1110 slave_gid = 0;
1111 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1112 (vf_gids / num_vfs + 1))
1113 slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1114 (vf_gids / num_vfs + 1)) + 1;
1115 else
1116 slave_gid =
1117 ((found_ix - MLX4_ROCE_PF_GIDS -
1118 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1119 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1120
1121 if (slave_gid) {
1122 struct mlx4_active_ports exclusive_ports;
1123 struct mlx4_active_ports actv_ports;
1124 struct mlx4_slaves_pport slaves_pport_actv;
1125 unsigned max_port_p_one;
1126 int num_slaves_before = 1;
1127
1128 for (i = 1; i < port; i++) {
1129 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1130 set_bit(i, exclusive_ports.ports);
1131 slaves_pport_actv =
1132 mlx4_phys_to_slaves_pport_actv(
1133 dev, &exclusive_ports);
1134 num_slaves_before += bitmap_weight(
1135 slaves_pport_actv.slaves,
1136 dev->num_vfs + 1);
1137 }
1138
1139 if (slave_gid < num_slaves_before) {
1140 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1141 set_bit(port - 1, exclusive_ports.ports);
1142 slaves_pport_actv =
1143 mlx4_phys_to_slaves_pport_actv(
1144 dev, &exclusive_ports);
1145 slave_gid += bitmap_weight(
1146 slaves_pport_actv.slaves,
1147 dev->num_vfs + 1) -
1148 num_slaves_before;
1149 }
1150 actv_ports = mlx4_get_active_ports(dev, slave_gid);
1151 max_port_p_one = find_first_bit(
1152 actv_ports.ports, dev->caps.num_ports) +
1153 bitmap_weight(actv_ports.ports,
1154 dev->caps.num_ports) + 1;
1155
1156 for (i = 1; i < max_port_p_one; i++) {
1157 if (i == port)
1158 continue;
1159 bitmap_zero(exclusive_ports.ports,
1160 dev->caps.num_ports);
1161 set_bit(i - 1, exclusive_ports.ports);
1162 slaves_pport_actv =
1163 mlx4_phys_to_slaves_pport_actv(
1164 dev, &exclusive_ports);
1165 slave_gid += bitmap_weight(
1166 slaves_pport_actv.slaves,
1167 dev->num_vfs + 1);
1168 }
1169 }
1170 *slave_id = slave_gid;
1171 }
1172
1173 return (found_ix >= 0) ? 0 : -EINVAL;
1174}
1175EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1176
1177int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1178 u8 *gid)
1179{
1180 struct mlx4_priv *priv = mlx4_priv(dev);
1181
1182 if (!mlx4_is_master(dev))
1183 return -EINVAL;
1184
1185 memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16);
1186 return 0;
1187}
1188EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 57428a0cb9dd..3b5f53ef29b2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -52,6 +52,8 @@
52struct mac_res { 52struct mac_res {
53 struct list_head list; 53 struct list_head list;
54 u64 mac; 54 u64 mac;
55 int ref_count;
56 u8 smac_index;
55 u8 port; 57 u8 port;
56}; 58};
57 59
@@ -219,6 +221,11 @@ struct res_fs_rule {
219 int qpn; 221 int qpn;
220}; 222};
221 223
224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
222static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
223{ 230{
224 struct rb_node *node = root->rb_node; 231 struct rb_node *node = root->rb_node;
@@ -461,6 +468,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
461 468
462 spin_lock_init(&res_alloc->alloc_lock); 469 spin_lock_init(&res_alloc->alloc_lock);
463 for (t = 0; t < dev->num_vfs + 1; t++) { 470 for (t = 0; t < dev->num_vfs + 1; t++) {
471 struct mlx4_active_ports actv_ports =
472 mlx4_get_active_ports(dev, t);
464 switch (i) { 473 switch (i) {
465 case RES_QP: 474 case RES_QP:
466 initialize_res_quotas(dev, res_alloc, RES_QP, 475 initialize_res_quotas(dev, res_alloc, RES_QP,
@@ -490,10 +499,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
490 break; 499 break;
491 case RES_MAC: 500 case RES_MAC:
492 if (t == mlx4_master_func_num(dev)) { 501 if (t == mlx4_master_func_num(dev)) {
493 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 502 int max_vfs_pport = 0;
503 /* Calculate the max vfs per port for */
504 /* both ports. */
505 for (j = 0; j < dev->caps.num_ports;
506 j++) {
507 struct mlx4_slaves_pport slaves_pport =
508 mlx4_phys_to_slaves_pport(dev, j + 1);
509 unsigned current_slaves =
510 bitmap_weight(slaves_pport.slaves,
511 dev->caps.num_ports) - 1;
512 if (max_vfs_pport < current_slaves)
513 max_vfs_pport =
514 current_slaves;
515 }
516 res_alloc->quota[t] =
517 MLX4_MAX_MAC_NUM -
518 2 * max_vfs_pport;
494 res_alloc->guaranteed[t] = 2; 519 res_alloc->guaranteed[t] = 2;
495 for (j = 0; j < MLX4_MAX_PORTS; j++) 520 for (j = 0; j < MLX4_MAX_PORTS; j++)
496 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; 521 res_alloc->res_port_free[j] =
522 MLX4_MAX_MAC_NUM;
497 } else { 523 } else {
498 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 524 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
499 res_alloc->guaranteed[t] = 2; 525 res_alloc->guaranteed[t] = 2;
@@ -521,9 +547,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
521 break; 547 break;
522 } 548 }
523 if (i == RES_MAC || i == RES_VLAN) { 549 if (i == RES_MAC || i == RES_VLAN) {
524 for (j = 0; j < MLX4_MAX_PORTS; j++) 550 for (j = 0; j < dev->caps.num_ports; j++)
525 res_alloc->res_port_rsvd[j] += 551 if (test_bit(j, actv_ports.ports))
526 res_alloc->guaranteed[t]; 552 res_alloc->res_port_rsvd[j] +=
553 res_alloc->guaranteed[t];
527 } else { 554 } else {
528 res_alloc->res_reserved += res_alloc->guaranteed[t]; 555 res_alloc->res_reserved += res_alloc->guaranteed[t];
529 } 556 }
@@ -600,15 +627,37 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
600 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; 627 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
601 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); 628 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
602 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 629 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
630 int port;
603 631
604 if (MLX4_QP_ST_UD == ts) 632 if (MLX4_QP_ST_UD == ts) {
605 qp_ctx->pri_path.mgid_index = 0x80 | slave; 633 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
606 634 if (mlx4_is_eth(dev, port))
607 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) { 635 qp_ctx->pri_path.mgid_index =
608 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) 636 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
609 qp_ctx->pri_path.mgid_index = slave & 0x7F; 637 else
610 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 638 qp_ctx->pri_path.mgid_index = slave | 0x80;
611 qp_ctx->alt_path.mgid_index = slave & 0x7F; 639
640 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
641 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
642 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
643 if (mlx4_is_eth(dev, port)) {
644 qp_ctx->pri_path.mgid_index +=
645 mlx4_get_base_gid_ix(dev, slave, port);
646 qp_ctx->pri_path.mgid_index &= 0x7f;
647 } else {
648 qp_ctx->pri_path.mgid_index = slave & 0x7F;
649 }
650 }
651 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
652 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
653 if (mlx4_is_eth(dev, port)) {
654 qp_ctx->alt_path.mgid_index +=
655 mlx4_get_base_gid_ix(dev, slave, port);
656 qp_ctx->alt_path.mgid_index &= 0x7f;
657 } else {
658 qp_ctx->alt_path.mgid_index = slave & 0x7F;
659 }
660 }
612 } 661 }
613} 662}
614 663
@@ -619,7 +668,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
619 struct mlx4_qp_context *qpc = inbox->buf + 8; 668 struct mlx4_qp_context *qpc = inbox->buf + 8;
620 struct mlx4_vport_oper_state *vp_oper; 669 struct mlx4_vport_oper_state *vp_oper;
621 struct mlx4_priv *priv; 670 struct mlx4_priv *priv;
622 u32 qp_type;
623 int port; 671 int port;
624 672
625 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 673 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
@@ -627,12 +675,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
627 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 675 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
628 676
629 if (MLX4_VGT != vp_oper->state.default_vlan) { 677 if (MLX4_VGT != vp_oper->state.default_vlan) {
630 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
631 if (MLX4_QP_ST_RC == qp_type ||
632 (MLX4_QP_ST_UD == qp_type &&
633 !mlx4_is_qp_reserved(dev, qpn)))
634 return -EINVAL;
635
636 /* the reserved QPs (special, proxy, tunnel) 678 /* the reserved QPs (special, proxy, tunnel)
637 * do not operate over vlans 679 * do not operate over vlans
638 */ 680 */
@@ -1659,11 +1701,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1659 return err; 1701 return err;
1660} 1702}
1661 1703
1662static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) 1704static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1705 u8 smac_index, u64 *mac)
1706{
1707 struct mlx4_priv *priv = mlx4_priv(dev);
1708 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1709 struct list_head *mac_list =
1710 &tracker->slave_list[slave].res_list[RES_MAC];
1711 struct mac_res *res, *tmp;
1712
1713 list_for_each_entry_safe(res, tmp, mac_list, list) {
1714 if (res->smac_index == smac_index && res->port == (u8) port) {
1715 *mac = res->mac;
1716 return 0;
1717 }
1718 }
1719 return -ENOENT;
1720}
1721
1722static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1663{ 1723{
1664 struct mlx4_priv *priv = mlx4_priv(dev); 1724 struct mlx4_priv *priv = mlx4_priv(dev);
1665 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1666 struct mac_res *res; 1726 struct list_head *mac_list =
1727 &tracker->slave_list[slave].res_list[RES_MAC];
1728 struct mac_res *res, *tmp;
1729
1730 list_for_each_entry_safe(res, tmp, mac_list, list) {
1731 if (res->mac == mac && res->port == (u8) port) {
1732 /* mac found. update ref count */
1733 ++res->ref_count;
1734 return 0;
1735 }
1736 }
1667 1737
1668 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1738 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1669 return -EINVAL; 1739 return -EINVAL;
@@ -1674,6 +1744,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1674 } 1744 }
1675 res->mac = mac; 1745 res->mac = mac;
1676 res->port = (u8) port; 1746 res->port = (u8) port;
1747 res->smac_index = smac_index;
1748 res->ref_count = 1;
1677 list_add_tail(&res->list, 1749 list_add_tail(&res->list,
1678 &tracker->slave_list[slave].res_list[RES_MAC]); 1750 &tracker->slave_list[slave].res_list[RES_MAC]);
1679 return 0; 1751 return 0;
@@ -1690,9 +1762,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1690 1762
1691 list_for_each_entry_safe(res, tmp, mac_list, list) { 1763 list_for_each_entry_safe(res, tmp, mac_list, list) {
1692 if (res->mac == mac && res->port == (u8) port) { 1764 if (res->mac == mac && res->port == (u8) port) {
1693 list_del(&res->list); 1765 if (!--res->ref_count) {
1694 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1766 list_del(&res->list);
1695 kfree(res); 1767 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1768 kfree(res);
1769 }
1696 break; 1770 break;
1697 } 1771 }
1698 } 1772 }
@@ -1705,10 +1779,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1705 struct list_head *mac_list = 1779 struct list_head *mac_list =
1706 &tracker->slave_list[slave].res_list[RES_MAC]; 1780 &tracker->slave_list[slave].res_list[RES_MAC];
1707 struct mac_res *res, *tmp; 1781 struct mac_res *res, *tmp;
1782 int i;
1708 1783
1709 list_for_each_entry_safe(res, tmp, mac_list, list) { 1784 list_for_each_entry_safe(res, tmp, mac_list, list) {
1710 list_del(&res->list); 1785 list_del(&res->list);
1711 __mlx4_unregister_mac(dev, res->port, res->mac); 1786 /* dereference the mac the num times the slave referenced it */
1787 for (i = 0; i < res->ref_count; i++)
1788 __mlx4_unregister_mac(dev, res->port, res->mac);
1712 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 1789 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1713 kfree(res); 1790 kfree(res);
1714 } 1791 }
@@ -1720,21 +1797,28 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1720 int err = -EINVAL; 1797 int err = -EINVAL;
1721 int port; 1798 int port;
1722 u64 mac; 1799 u64 mac;
1800 u8 smac_index;
1723 1801
1724 if (op != RES_OP_RESERVE_AND_MAP) 1802 if (op != RES_OP_RESERVE_AND_MAP)
1725 return err; 1803 return err;
1726 1804
1727 port = !in_port ? get_param_l(out_param) : in_port; 1805 port = !in_port ? get_param_l(out_param) : in_port;
1806 port = mlx4_slave_convert_port(
1807 dev, slave, port);
1808
1809 if (port < 0)
1810 return -EINVAL;
1728 mac = in_param; 1811 mac = in_param;
1729 1812
1730 err = __mlx4_register_mac(dev, port, mac); 1813 err = __mlx4_register_mac(dev, port, mac);
1731 if (err >= 0) { 1814 if (err >= 0) {
1815 smac_index = err;
1732 set_param_l(out_param, err); 1816 set_param_l(out_param, err);
1733 err = 0; 1817 err = 0;
1734 } 1818 }
1735 1819
1736 if (!err) { 1820 if (!err) {
1737 err = mac_add_to_slave(dev, slave, mac, port); 1821 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1738 if (err) 1822 if (err)
1739 __mlx4_unregister_mac(dev, port, mac); 1823 __mlx4_unregister_mac(dev, port, mac);
1740 } 1824 }
@@ -1831,6 +1915,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1831 if (!port || op != RES_OP_RESERVE_AND_MAP) 1915 if (!port || op != RES_OP_RESERVE_AND_MAP)
1832 return -EINVAL; 1916 return -EINVAL;
1833 1917
1918 port = mlx4_slave_convert_port(
1919 dev, slave, port);
1920
1921 if (port < 0)
1922 return -EINVAL;
1834 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 1923 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1835 if (!in_port && port > 0 && port <= dev->caps.num_ports) { 1924 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1836 slave_state[slave].old_vlan_api = true; 1925 slave_state[slave].old_vlan_api = true;
@@ -2128,6 +2217,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2128 switch (op) { 2217 switch (op) {
2129 case RES_OP_RESERVE_AND_MAP: 2218 case RES_OP_RESERVE_AND_MAP:
2130 port = !in_port ? get_param_l(out_param) : in_port; 2219 port = !in_port ? get_param_l(out_param) : in_port;
2220 port = mlx4_slave_convert_port(
2221 dev, slave, port);
2222
2223 if (port < 0)
2224 return -EINVAL;
2131 mac_del_from_slave(dev, slave, in_param, port); 2225 mac_del_from_slave(dev, slave, in_param, port);
2132 __mlx4_unregister_mac(dev, port, in_param); 2226 __mlx4_unregister_mac(dev, port, in_param);
2133 break; 2227 break;
@@ -2147,6 +2241,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2147 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2241 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2148 int err = 0; 2242 int err = 0;
2149 2243
2244 port = mlx4_slave_convert_port(
2245 dev, slave, port);
2246
2247 if (port < 0)
2248 return -EINVAL;
2150 switch (op) { 2249 switch (op) {
2151 case RES_OP_RESERVE_AND_MAP: 2250 case RES_OP_RESERVE_AND_MAP:
2152 if (slave_state[slave].old_vlan_api) 2251 if (slave_state[slave].old_vlan_api)
@@ -2734,6 +2833,8 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2734 u32 qp_type; 2833 u32 qp_type;
2735 struct mlx4_qp_context *qp_ctx; 2834 struct mlx4_qp_context *qp_ctx;
2736 enum mlx4_qp_optpar optpar; 2835 enum mlx4_qp_optpar optpar;
2836 int port;
2837 int num_gids;
2737 2838
2738 qp_ctx = inbox->buf + 8; 2839 qp_ctx = inbox->buf + 8;
2739 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 2840 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
@@ -2741,6 +2842,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2741 2842
2742 switch (qp_type) { 2843 switch (qp_type) {
2743 case MLX4_QP_ST_RC: 2844 case MLX4_QP_ST_RC:
2845 case MLX4_QP_ST_XRC:
2744 case MLX4_QP_ST_UC: 2846 case MLX4_QP_ST_UC:
2745 switch (transition) { 2847 switch (transition) {
2746 case QP_TRANS_INIT2RTR: 2848 case QP_TRANS_INIT2RTR:
@@ -2749,13 +2851,24 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2749 case QP_TRANS_SQD2SQD: 2851 case QP_TRANS_SQD2SQD:
2750 case QP_TRANS_SQD2RTS: 2852 case QP_TRANS_SQD2RTS:
2751 if (slave != mlx4_master_func_num(dev)) 2853 if (slave != mlx4_master_func_num(dev))
2752 /* slaves have only gid index 0 */ 2854 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2753 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) 2855 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2754 if (qp_ctx->pri_path.mgid_index) 2856 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2857 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2858 else
2859 num_gids = 1;
2860 if (qp_ctx->pri_path.mgid_index >= num_gids)
2755 return -EINVAL; 2861 return -EINVAL;
2756 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 2862 }
2757 if (qp_ctx->alt_path.mgid_index) 2863 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2864 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2865 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2866 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2867 else
2868 num_gids = 1;
2869 if (qp_ctx->alt_path.mgid_index >= num_gids)
2758 return -EINVAL; 2870 return -EINVAL;
2871 }
2759 break; 2872 break;
2760 default: 2873 default:
2761 break; 2874 break;
@@ -3268,6 +3381,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3268 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3381 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3269} 3382}
3270 3383
3384static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3385 struct mlx4_qp_context *qpc,
3386 struct mlx4_cmd_mailbox *inbox)
3387{
3388 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3389 u8 pri_sched_queue;
3390 int port = mlx4_slave_convert_port(
3391 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3392
3393 if (port < 0)
3394 return -EINVAL;
3395
3396 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3397 ((port & 1) << 6);
3398
3399 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3400 mlx4_is_eth(dev, port + 1)) {
3401 qpc->pri_path.sched_queue = pri_sched_queue;
3402 }
3403
3404 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3405 port = mlx4_slave_convert_port(
3406 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3407 + 1) - 1;
3408 if (port < 0)
3409 return -EINVAL;
3410 qpc->alt_path.sched_queue =
3411 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3412 (port & 1) << 6;
3413 }
3414 return 0;
3415}
3416
3417static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3418 struct mlx4_qp_context *qpc,
3419 struct mlx4_cmd_mailbox *inbox)
3420{
3421 u64 mac;
3422 int port;
3423 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3424 u8 sched = *(u8 *)(inbox->buf + 64);
3425 u8 smac_ix;
3426
3427 port = (sched >> 6 & 1) + 1;
3428 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3429 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3430 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3431 return -ENOENT;
3432 }
3433 return 0;
3434}
3435
3271int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 3436int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3272 struct mlx4_vhcr *vhcr, 3437 struct mlx4_vhcr *vhcr,
3273 struct mlx4_cmd_mailbox *inbox, 3438 struct mlx4_cmd_mailbox *inbox,
@@ -3286,10 +3451,16 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3286 u8 orig_vlan_index = qpc->pri_path.vlan_index; 3451 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3287 u8 orig_feup = qpc->pri_path.feup; 3452 u8 orig_feup = qpc->pri_path.feup;
3288 3453
3454 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3455 if (err)
3456 return err;
3289 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); 3457 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3290 if (err) 3458 if (err)
3291 return err; 3459 return err;
3292 3460
3461 if (roce_verify_mac(dev, slave, qpc, inbox))
3462 return -EINVAL;
3463
3293 update_pkey_index(dev, slave, inbox); 3464 update_pkey_index(dev, slave, inbox);
3294 update_gid(dev, inbox, (u8)slave); 3465 update_gid(dev, inbox, (u8)slave);
3295 adjust_proxy_tun_qkey(dev, vhcr, qpc); 3466 adjust_proxy_tun_qkey(dev, vhcr, qpc);
@@ -3334,6 +3505,9 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3334 int err; 3505 int err;
3335 struct mlx4_qp_context *context = inbox->buf + 8; 3506 struct mlx4_qp_context *context = inbox->buf + 8;
3336 3507
3508 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3509 if (err)
3510 return err;
3337 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); 3511 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3338 if (err) 3512 if (err)
3339 return err; 3513 return err;
@@ -3353,6 +3527,9 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3353 int err; 3527 int err;
3354 struct mlx4_qp_context *context = inbox->buf + 8; 3528 struct mlx4_qp_context *context = inbox->buf + 8;
3355 3529
3530 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3531 if (err)
3532 return err;
3356 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); 3533 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3357 if (err) 3534 if (err)
3358 return err; 3535 return err;
@@ -3371,6 +3548,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3371 struct mlx4_cmd_info *cmd) 3548 struct mlx4_cmd_info *cmd)
3372{ 3549{
3373 struct mlx4_qp_context *context = inbox->buf + 8; 3550 struct mlx4_qp_context *context = inbox->buf + 8;
3551 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3552 if (err)
3553 return err;
3374 adjust_proxy_tun_qkey(dev, vhcr, context); 3554 adjust_proxy_tun_qkey(dev, vhcr, context);
3375 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3555 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3376} 3556}
@@ -3384,6 +3564,9 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3384 int err; 3564 int err;
3385 struct mlx4_qp_context *context = inbox->buf + 8; 3565 struct mlx4_qp_context *context = inbox->buf + 8;
3386 3566
3567 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3568 if (err)
3569 return err;
3387 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); 3570 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3388 if (err) 3571 if (err)
3389 return err; 3572 return err;
@@ -3403,6 +3586,9 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3403 int err; 3586 int err;
3404 struct mlx4_qp_context *context = inbox->buf + 8; 3587 struct mlx4_qp_context *context = inbox->buf + 8;
3405 3588
3589 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3590 if (err)
3591 return err;
3406 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); 3592 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3407 if (err) 3593 if (err)
3408 return err; 3594 return err;
@@ -3506,16 +3692,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3506 return err; 3692 return err;
3507} 3693}
3508 3694
3509static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 3695static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3510 int block_loopback, enum mlx4_protocol prot, 3696 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3511 enum mlx4_steer_type type, u64 *reg_id) 3697 enum mlx4_steer_type type, u64 *reg_id)
3512{ 3698{
3513 switch (dev->caps.steering_mode) { 3699 switch (dev->caps.steering_mode) {
3514 case MLX4_STEERING_MODE_DEVICE_MANAGED: 3700 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3515 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5], 3701 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3702 if (port < 0)
3703 return port;
3704 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3516 block_loopback, prot, 3705 block_loopback, prot,
3517 reg_id); 3706 reg_id);
3707 }
3518 case MLX4_STEERING_MODE_B0: 3708 case MLX4_STEERING_MODE_B0:
3709 if (prot == MLX4_PROT_ETH) {
3710 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3711 if (port < 0)
3712 return port;
3713 gid[5] = port;
3714 }
3519 return mlx4_qp_attach_common(dev, qp, gid, 3715 return mlx4_qp_attach_common(dev, qp, gid,
3520 block_loopback, prot, type); 3716 block_loopback, prot, type);
3521 default: 3717 default:
@@ -3523,9 +3719,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3523 } 3719 }
3524} 3720}
3525 3721
3526static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 3722static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3527 enum mlx4_protocol prot, enum mlx4_steer_type type, 3723 u8 gid[16], enum mlx4_protocol prot,
3528 u64 reg_id) 3724 enum mlx4_steer_type type, u64 reg_id)
3529{ 3725{
3530 switch (dev->caps.steering_mode) { 3726 switch (dev->caps.steering_mode) {
3531 case MLX4_STEERING_MODE_DEVICE_MANAGED: 3727 case MLX4_STEERING_MODE_DEVICE_MANAGED:
@@ -3562,7 +3758,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3562 3758
3563 qp.qpn = qpn; 3759 qp.qpn = qpn;
3564 if (attach) { 3760 if (attach) {
3565 err = qp_attach(dev, &qp, gid, block_loopback, prot, 3761 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3566 type, &reg_id); 3762 type, &reg_id);
3567 if (err) { 3763 if (err) {
3568 pr_err("Fail to attach rule to qp 0x%x\n", qpn); 3764 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
@@ -3698,6 +3894,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3698 return -EOPNOTSUPP; 3894 return -EOPNOTSUPP;
3699 3895
3700 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3896 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3897 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
3898 if (ctrl->port <= 0)
3899 return -EINVAL;
3701 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3900 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3702 err = get_res(dev, slave, qpn, RES_QP, &rqp); 3901 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3703 if (err) { 3902 if (err) {
@@ -3816,16 +4015,6 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3816 return err; 4015 return err;
3817} 4016}
3818 4017
3819int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
3820 struct mlx4_vhcr *vhcr,
3821 struct mlx4_cmd_mailbox *inbox,
3822 struct mlx4_cmd_mailbox *outbox,
3823 struct mlx4_cmd_info *cmd)
3824{
3825 return -EPERM;
3826}
3827
3828
3829static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) 4018static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3830{ 4019{
3831 struct res_gid *rgid; 4020 struct res_gid *rgid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 23b7e2d35a93..77ac95f052da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
116 struct mlx5_eq_table *table = &dev->priv.eq_table; 116 struct mlx5_eq_table *table = &dev->priv.eq_table;
117 int num_eqs = 1 << dev->caps.log_max_eq; 117 int num_eqs = 1 << dev->caps.log_max_eq;
118 int nvec; 118 int nvec;
119 int err;
120 int i; 119 int i;
121 120
122 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; 121 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
@@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
131 for (i = 0; i < nvec; i++) 130 for (i = 0; i < nvec; i++)
132 table->msix_arr[i].entry = i; 131 table->msix_arr[i].entry = i;
133 132
134retry: 133 nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
135 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 134 MLX5_EQ_VEC_COMP_BASE, nvec);
136 err = pci_enable_msix(dev->pdev, table->msix_arr, nvec); 135 if (nvec < 0)
137 if (err <= 0) { 136 return nvec;
138 return err;
139 } else if (err > 2) {
140 nvec = err;
141 goto retry;
142 }
143 137
144 mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec); 138 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
145 139
146 return 0; 140 return 0;
147} 141}
@@ -537,7 +531,6 @@ static int __init init(void)
537 531
538 return 0; 532 return 0;
539 533
540 mlx5_health_cleanup();
541err_debug: 534err_debug:
542 mlx5_unregister_debugfs(); 535 mlx5_unregister_debugfs();
543 return err; 536 return err;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ce84dc289c8f..14ac0e2bc09f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4832,7 +4832,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
4832 skb->csum = old->csum; 4832 skb->csum = old->csum;
4833 skb_set_network_header(skb, ETH_HLEN); 4833 skb_set_network_header(skb, ETH_HLEN);
4834 4834
4835 dev_kfree_skb(old); 4835 dev_consume_skb_any(old);
4836} 4836}
4837 4837
4838/** 4838/**
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 68026f7e8ba3..130f6b204efa 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
2329 status = 0; 2329 status = 0;
2330 if (myri10ge_msi) { 2330 if (myri10ge_msi) {
2331 if (mgp->num_slices > 1) { 2331 if (mgp->num_slices > 1) {
2332 status = 2332 status = pci_enable_msix_range(pdev, mgp->msix_vectors,
2333 pci_enable_msix(pdev, mgp->msix_vectors, 2333 mgp->num_slices, mgp->num_slices);
2334 mgp->num_slices); 2334 if (status < 0) {
2335 if (status == 0) {
2336 mgp->msix_enabled = 1;
2337 } else {
2338 dev_err(&pdev->dev, 2335 dev_err(&pdev->dev,
2339 "Error %d setting up MSI-X\n", status); 2336 "Error %d setting up MSI-X\n", status);
2340 return status; 2337 return status;
2341 } 2338 }
2339 mgp->msix_enabled = 1;
2342 } 2340 }
2343 if (mgp->msix_enabled == 0) { 2341 if (mgp->msix_enabled == 0) {
2344 status = pci_enable_msi(pdev); 2342 status = pci_enable_msi(pdev);
@@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3895 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors), 3893 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3896 GFP_KERNEL); 3894 GFP_KERNEL);
3897 if (mgp->msix_vectors == NULL) 3895 if (mgp->msix_vectors == NULL)
3898 goto disable_msix; 3896 goto no_msix;
3899 for (i = 0; i < mgp->num_slices; i++) { 3897 for (i = 0; i < mgp->num_slices; i++) {
3900 mgp->msix_vectors[i].entry = i; 3898 mgp->msix_vectors[i].entry = i;
3901 } 3899 }
3902 3900
3903 while (mgp->num_slices > 1) { 3901 while (mgp->num_slices > 1) {
3904 /* make sure it is a power of two */ 3902 mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
3905 while (!is_power_of_2(mgp->num_slices))
3906 mgp->num_slices--;
3907 if (mgp->num_slices == 1) 3903 if (mgp->num_slices == 1)
3908 goto disable_msix; 3904 goto no_msix;
3909 status = pci_enable_msix(pdev, mgp->msix_vectors, 3905 status = pci_enable_msix_range(pdev,
3910 mgp->num_slices); 3906 mgp->msix_vectors,
3911 if (status == 0) { 3907 mgp->num_slices,
3912 pci_disable_msix(pdev); 3908 mgp->num_slices);
3909 if (status < 0)
3910 goto no_msix;
3911
3912 pci_disable_msix(pdev);
3913
3914 if (status == mgp->num_slices) {
3913 if (old_allocated) 3915 if (old_allocated)
3914 kfree(old_fw); 3916 kfree(old_fw);
3915 return; 3917 return;
3916 } 3918 } else {
3917 if (status > 0)
3918 mgp->num_slices = status; 3919 mgp->num_slices = status;
3919 else 3920 }
3920 goto disable_msix;
3921 } 3921 }
3922 3922
3923disable_msix: 3923no_msix:
3924 if (mgp->msix_vectors != NULL) { 3924 if (mgp->msix_vectors != NULL) {
3925 kfree(mgp->msix_vectors); 3925 kfree(mgp->msix_vectors);
3926 mgp->msix_vectors = NULL; 3926 mgp->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9eeddbd0b2c7..a2844ff322c4 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2914,6 +2914,9 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
2914 struct RxD1 *rxdp1; 2914 struct RxD1 *rxdp1;
2915 struct RxD3 *rxdp3; 2915 struct RxD3 *rxdp3;
2916 2916
2917 if (budget <= 0)
2918 return napi_pkts;
2919
2917 get_info = ring_data->rx_curr_get_info; 2920 get_info = ring_data->rx_curr_get_info;
2918 get_block = get_info.block_index; 2921 get_block = get_info.block_index;
2919 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); 2922 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
@@ -3792,9 +3795,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3792 writeq(rx_mat, &bar0->rx_mat); 3795 writeq(rx_mat, &bar0->rx_mat);
3793 readq(&bar0->rx_mat); 3796 readq(&bar0->rx_mat);
3794 3797
3795 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); 3798 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3799 nic->num_entries, nic->num_entries);
3796 /* We fail init if error or we get less vectors than min required */ 3800 /* We fail init if error or we get less vectors than min required */
3797 if (ret) { 3801 if (ret < 0) {
3798 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n"); 3802 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3799 kfree(nic->entries); 3803 kfree(nic->entries);
3800 swstats->mem_freed += nic->num_entries * 3804 swstats->mem_freed += nic->num_entries *
@@ -4045,7 +4049,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4045 if (!is_s2io_card_up(sp)) { 4049 if (!is_s2io_card_up(sp)) {
4046 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", 4050 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4047 dev->name); 4051 dev->name);
4048 dev_kfree_skb(skb); 4052 dev_kfree_skb_any(skb);
4049 return NETDEV_TX_OK; 4053 return NETDEV_TX_OK;
4050 } 4054 }
4051 4055
@@ -4118,7 +4122,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4118 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { 4122 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4119 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); 4123 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4120 s2io_stop_tx_queue(sp, fifo->fifo_no); 4124 s2io_stop_tx_queue(sp, fifo->fifo_no);
4121 dev_kfree_skb(skb); 4125 dev_kfree_skb_any(skb);
4122 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4126 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4123 return NETDEV_TX_OK; 4127 return NETDEV_TX_OK;
4124 } 4128 }
@@ -4240,7 +4244,7 @@ pci_map_failed:
4240 swstats->pci_map_fail_cnt++; 4244 swstats->pci_map_fail_cnt++;
4241 s2io_stop_tx_queue(sp, fifo->fifo_no); 4245 s2io_stop_tx_queue(sp, fifo->fifo_no);
4242 swstats->mem_freed += skb->truesize; 4246 swstats->mem_freed += skb->truesize;
4243 dev_kfree_skb(skb); 4247 dev_kfree_skb_any(skb);
4244 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4248 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4245 return NETDEV_TX_OK; 4249 return NETDEV_TX_OK;
4246} 4250}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e46e8698e630..d107bcbb8543 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -368,6 +368,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
368 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 368 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
369 ring->ndev->name, __func__, __LINE__); 369 ring->ndev->name, __func__, __LINE__);
370 370
371 if (ring->budget <= 0)
372 goto out;
373
371 do { 374 do {
372 prefetch((char *)dtr + L1_CACHE_BYTES); 375 prefetch((char *)dtr + L1_CACHE_BYTES);
373 rx_priv = vxge_hw_ring_rxd_private_get(dtr); 376 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
@@ -525,6 +528,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
525 if (first_dtr) 528 if (first_dtr)
526 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); 529 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
527 530
531out:
528 vxge_debug_entryexit(VXGE_TRACE, 532 vxge_debug_entryexit(VXGE_TRACE,
529 "%s:%d Exiting...", 533 "%s:%d Exiting...",
530 __func__, __LINE__); 534 __func__, __LINE__);
@@ -820,7 +824,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
820 if (unlikely(skb->len <= 0)) { 824 if (unlikely(skb->len <= 0)) {
821 vxge_debug_tx(VXGE_ERR, 825 vxge_debug_tx(VXGE_ERR,
822 "%s: Buffer has no data..", dev->name); 826 "%s: Buffer has no data..", dev->name);
823 dev_kfree_skb(skb); 827 dev_kfree_skb_any(skb);
824 return NETDEV_TX_OK; 828 return NETDEV_TX_OK;
825 } 829 }
826 830
@@ -829,7 +833,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
829 if (unlikely(!is_vxge_card_up(vdev))) { 833 if (unlikely(!is_vxge_card_up(vdev))) {
830 vxge_debug_tx(VXGE_ERR, 834 vxge_debug_tx(VXGE_ERR,
831 "%s: vdev not initialized", dev->name); 835 "%s: vdev not initialized", dev->name);
832 dev_kfree_skb(skb); 836 dev_kfree_skb_any(skb);
833 return NETDEV_TX_OK; 837 return NETDEV_TX_OK;
834 } 838 }
835 839
@@ -839,7 +843,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
839 vxge_debug_tx(VXGE_ERR, 843 vxge_debug_tx(VXGE_ERR,
840 "%s: Failed to store the mac address", 844 "%s: Failed to store the mac address",
841 dev->name); 845 dev->name);
842 dev_kfree_skb(skb); 846 dev_kfree_skb_any(skb);
843 return NETDEV_TX_OK; 847 return NETDEV_TX_OK;
844 } 848 }
845 } 849 }
@@ -986,7 +990,7 @@ _exit1:
986 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 990 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
987_exit0: 991_exit0:
988 netif_tx_stop_queue(fifo->txq); 992 netif_tx_stop_queue(fifo->txq);
989 dev_kfree_skb(skb); 993 dev_kfree_skb_any(skb);
990 994
991 return NETDEV_TX_OK; 995 return NETDEV_TX_OK;
992} 996}
@@ -2349,12 +2353,18 @@ start:
2349 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; 2353 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2350 vdev->vxge_entries[j].in_use = 0; 2354 vdev->vxge_entries[j].in_use = 0;
2351 2355
2352 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); 2356 ret = pci_enable_msix_range(vdev->pdev,
2353 if (ret > 0) { 2357 vdev->entries, 3, vdev->intr_cnt);
2358 if (ret < 0) {
2359 ret = -ENODEV;
2360 goto enable_msix_failed;
2361 } else if (ret < vdev->intr_cnt) {
2362 pci_disable_msix(vdev->pdev);
2363
2354 vxge_debug_init(VXGE_ERR, 2364 vxge_debug_init(VXGE_ERR,
2355 "%s: MSI-X enable failed for %d vectors, ret: %d", 2365 "%s: MSI-X enable failed for %d vectors, ret: %d",
2356 VXGE_DRIVER_NAME, vdev->intr_cnt, ret); 2366 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2357 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) { 2367 if (max_config_vpath != VXGE_USE_DEFAULT) {
2358 ret = -ENODEV; 2368 ret = -ENODEV;
2359 goto enable_msix_failed; 2369 goto enable_msix_failed;
2360 } 2370 }
@@ -2368,9 +2378,6 @@ start:
2368 vxge_close_vpaths(vdev, temp); 2378 vxge_close_vpaths(vdev, temp);
2369 vdev->no_of_vpath = temp; 2379 vdev->no_of_vpath = temp;
2370 goto start; 2380 goto start;
2371 } else if (ret < 0) {
2372 ret = -ENODEV;
2373 goto enable_msix_failed;
2374 } 2381 }
2375 return 0; 2382 return 0;
2376 2383
@@ -3131,12 +3138,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3131 u64 packets, bytes, multicast; 3138 u64 packets, bytes, multicast;
3132 3139
3133 do { 3140 do {
3134 start = u64_stats_fetch_begin_bh(&rxstats->syncp); 3141 start = u64_stats_fetch_begin_irq(&rxstats->syncp);
3135 3142
3136 packets = rxstats->rx_frms; 3143 packets = rxstats->rx_frms;
3137 multicast = rxstats->rx_mcast; 3144 multicast = rxstats->rx_mcast;
3138 bytes = rxstats->rx_bytes; 3145 bytes = rxstats->rx_bytes;
3139 } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start)); 3146 } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
3140 3147
3141 net_stats->rx_packets += packets; 3148 net_stats->rx_packets += packets;
3142 net_stats->rx_bytes += bytes; 3149 net_stats->rx_bytes += bytes;
@@ -3146,11 +3153,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3146 net_stats->rx_dropped += rxstats->rx_dropped; 3153 net_stats->rx_dropped += rxstats->rx_dropped;
3147 3154
3148 do { 3155 do {
3149 start = u64_stats_fetch_begin_bh(&txstats->syncp); 3156 start = u64_stats_fetch_begin_irq(&txstats->syncp);
3150 3157
3151 packets = txstats->tx_frms; 3158 packets = txstats->tx_frms;
3152 bytes = txstats->tx_bytes; 3159 bytes = txstats->tx_bytes;
3153 } while (u64_stats_fetch_retry_bh(&txstats->syncp, start)); 3160 } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
3154 3161
3155 net_stats->tx_packets += packets; 3162 net_stats->tx_packets += packets;
3156 net_stats->tx_bytes += bytes; 3163 net_stats->tx_bytes += bytes;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 70cf97fe67f2..fddb464aeab3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1753,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1753 1753
1754 /* software stats */ 1754 /* software stats */
1755 do { 1755 do {
1756 syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); 1756 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1757 storage->rx_packets = np->stat_rx_packets; 1757 storage->rx_packets = np->stat_rx_packets;
1758 storage->rx_bytes = np->stat_rx_bytes; 1758 storage->rx_bytes = np->stat_rx_bytes;
1759 storage->rx_dropped = np->stat_rx_dropped; 1759 storage->rx_dropped = np->stat_rx_dropped;
1760 storage->rx_missed_errors = np->stat_rx_missed_errors; 1760 storage->rx_missed_errors = np->stat_rx_missed_errors;
1761 } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); 1761 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1762 1762
1763 do { 1763 do {
1764 syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); 1764 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1765 storage->tx_packets = np->stat_tx_packets; 1765 storage->tx_packets = np->stat_tx_packets;
1766 storage->tx_bytes = np->stat_tx_bytes; 1766 storage->tx_bytes = np->stat_tx_bytes;
1767 storage->tx_dropped = np->stat_tx_dropped; 1767 storage->tx_dropped = np->stat_tx_dropped;
1768 } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); 1768 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1769 1769
1770 /* If the nic supports hw counters then retrieve latest values */ 1770 /* If the nic supports hw counters then retrieve latest values */
1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) { 1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2231 if (pci_dma_mapping_error(np->pci_dev, 2231 if (pci_dma_mapping_error(np->pci_dev,
2232 np->put_tx_ctx->dma)) { 2232 np->put_tx_ctx->dma)) {
2233 /* on DMA mapping error - drop the packet */ 2233 /* on DMA mapping error - drop the packet */
2234 kfree_skb(skb); 2234 dev_kfree_skb_any(skb);
2235 u64_stats_update_begin(&np->swstats_tx_syncp); 2235 u64_stats_update_begin(&np->swstats_tx_syncp);
2236 np->stat_tx_dropped++; 2236 np->stat_tx_dropped++;
2237 u64_stats_update_end(&np->swstats_tx_syncp); 2237 u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2278 tmp_tx_ctx = np->first_tx_ctx; 2278 tmp_tx_ctx = np->first_tx_ctx;
2279 } while (tmp_tx_ctx != np->put_tx_ctx); 2279 } while (tmp_tx_ctx != np->put_tx_ctx);
2280 kfree_skb(skb); 2280 dev_kfree_skb_any(skb);
2281 np->put_tx_ctx = start_tx_ctx; 2281 np->put_tx_ctx = start_tx_ctx;
2282 u64_stats_update_begin(&np->swstats_tx_syncp); 2282 u64_stats_update_begin(&np->swstats_tx_syncp);
2283 np->stat_tx_dropped++; 2283 np->stat_tx_dropped++;
@@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2380 if (pci_dma_mapping_error(np->pci_dev, 2380 if (pci_dma_mapping_error(np->pci_dev,
2381 np->put_tx_ctx->dma)) { 2381 np->put_tx_ctx->dma)) {
2382 /* on DMA mapping error - drop the packet */ 2382 /* on DMA mapping error - drop the packet */
2383 kfree_skb(skb); 2383 dev_kfree_skb_any(skb);
2384 u64_stats_update_begin(&np->swstats_tx_syncp); 2384 u64_stats_update_begin(&np->swstats_tx_syncp);
2385 np->stat_tx_dropped++; 2385 np->stat_tx_dropped++;
2386 u64_stats_update_end(&np->swstats_tx_syncp); 2386 u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2428 tmp_tx_ctx = np->first_tx_ctx; 2428 tmp_tx_ctx = np->first_tx_ctx;
2429 } while (tmp_tx_ctx != np->put_tx_ctx); 2429 } while (tmp_tx_ctx != np->put_tx_ctx);
2430 kfree_skb(skb); 2430 dev_kfree_skb_any(skb);
2431 np->put_tx_ctx = start_tx_ctx; 2431 np->put_tx_ctx = start_tx_ctx;
2432 u64_stats_update_begin(&np->swstats_tx_syncp); 2432 u64_stats_update_begin(&np->swstats_tx_syncp);
2433 np->stat_tx_dropped++; 2433 np->stat_tx_dropped++;
@@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3930{ 3930{
3931 struct fe_priv *np = get_nvpriv(dev); 3931 struct fe_priv *np = get_nvpriv(dev);
3932 u8 __iomem *base = get_hwbase(dev); 3932 u8 __iomem *base = get_hwbase(dev);
3933 int ret = 1; 3933 int ret;
3934 int i; 3934 int i;
3935 irqreturn_t (*handler)(int foo, void *data); 3935 irqreturn_t (*handler)(int foo, void *data);
3936 3936
@@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3946 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3946 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3947 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) 3947 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3948 np->msi_x_entry[i].entry = i; 3948 np->msi_x_entry[i].entry = i;
3949 ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK)); 3949 ret = pci_enable_msix_range(np->pci_dev,
3950 if (ret == 0) { 3950 np->msi_x_entry,
3951 np->msi_flags & NV_MSI_X_VECTORS_MASK,
3952 np->msi_flags & NV_MSI_X_VECTORS_MASK);
3953 if (ret > 0) {
3951 np->msi_flags |= NV_MSI_X_ENABLED; 3954 np->msi_flags |= NV_MSI_X_ENABLED;
3952 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3955 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3953 /* Request irq for rx handling */ 3956 /* Request irq for rx handling */
3954 sprintf(np->name_rx, "%s-rx", dev->name); 3957 sprintf(np->name_rx, "%s-rx", dev->name);
3955 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3958 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3956 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3959 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
3960 if (ret) {
3957 netdev_info(dev, 3961 netdev_info(dev,
3958 "request_irq failed for rx %d\n", 3962 "request_irq failed for rx %d\n",
3959 ret); 3963 ret);
@@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3963 } 3967 }
3964 /* Request irq for tx handling */ 3968 /* Request irq for tx handling */
3965 sprintf(np->name_tx, "%s-tx", dev->name); 3969 sprintf(np->name_tx, "%s-tx", dev->name);
3966 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3970 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3967 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3971 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
3972 if (ret) {
3968 netdev_info(dev, 3973 netdev_info(dev,
3969 "request_irq failed for tx %d\n", 3974 "request_irq failed for tx %d\n",
3970 ret); 3975 ret);
@@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3974 } 3979 }
3975 /* Request irq for link and timer handling */ 3980 /* Request irq for link and timer handling */
3976 sprintf(np->name_other, "%s-other", dev->name); 3981 sprintf(np->name_other, "%s-other", dev->name);
3977 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3982 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3978 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3983 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
3984 if (ret) {
3979 netdev_info(dev, 3985 netdev_info(dev,
3980 "request_irq failed for link %d\n", 3986 "request_irq failed for link %d\n",
3981 ret); 3987 ret);
@@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3991 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3997 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3992 } else { 3998 } else {
3993 /* Request irq for all interrupts */ 3999 /* Request irq for all interrupts */
3994 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 4000 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
4001 handler, IRQF_SHARED, dev->name, dev);
4002 if (ret) {
3995 netdev_info(dev, 4003 netdev_info(dev,
3996 "request_irq failed %d\n", 4004 "request_irq failed %d\n",
3997 ret); 4005 ret);
@@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
4005 writel(0, base + NvRegMSIXMap1); 4013 writel(0, base + NvRegMSIXMap1);
4006 } 4014 }
4007 netdev_info(dev, "MSI-X enabled\n"); 4015 netdev_info(dev, "MSI-X enabled\n");
4016 return 0;
4008 } 4017 }
4009 } 4018 }
4010 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 4019 if (np->msi_flags & NV_MSI_CAPABLE) {
4011 ret = pci_enable_msi(np->pci_dev); 4020 ret = pci_enable_msi(np->pci_dev);
4012 if (ret == 0) { 4021 if (ret == 0) {
4013 np->msi_flags |= NV_MSI_ENABLED; 4022 np->msi_flags |= NV_MSI_ENABLED;
4014 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 4023 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4024 if (ret) {
4015 netdev_info(dev, "request_irq failed %d\n", 4025 netdev_info(dev, "request_irq failed %d\n",
4016 ret); 4026 ret);
4017 pci_disable_msi(np->pci_dev); 4027 pci_disable_msi(np->pci_dev);
@@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
4025 /* enable msi vector 0 */ 4035 /* enable msi vector 0 */
4026 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 4036 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4027 netdev_info(dev, "MSI enabled\n"); 4037 netdev_info(dev, "MSI enabled\n");
4038 return 0;
4028 } 4039 }
4029 } 4040 }
4030 if (ret != 0) {
4031 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4032 goto out_err;
4033 4041
4034 } 4042 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4043 goto out_err;
4035 4044
4036 return 0; 4045 return 0;
4037out_free_tx: 4046out_free_tx:
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 464e91058c81..73e66838cfef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -120,10 +120,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
120 int data); 120 int data);
121static void pch_gbe_set_multi(struct net_device *netdev); 121static void pch_gbe_set_multi(struct net_device *netdev);
122 122
123static struct sock_filter ptp_filter[] = {
124 PTP_FILTER
125};
126
127static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) 123static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
128{ 124{
129 u8 *data = skb->data; 125 u8 *data = skb->data;
@@ -131,7 +127,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
131 u16 *hi, *id; 127 u16 *hi, *id;
132 u32 lo; 128 u32 lo;
133 129
134 if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE) 130 if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
135 return 0; 131 return 0;
136 132
137 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 133 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -2635,11 +2631,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2635 2631
2636 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, 2632 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2637 PCI_DEVFN(12, 4)); 2633 PCI_DEVFN(12, 4));
2638 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2639 dev_err(&pdev->dev, "Bad ptp filter\n");
2640 ret = -EINVAL;
2641 goto err_free_netdev;
2642 }
2643 2634
2644 netdev->netdev_ops = &pch_gbe_netdev_ops; 2635 netdev->netdev_ops = &pch_gbe_netdev_ops;
2645 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2636 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index f59e6be4a66e..c14bd3116e45 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -56,6 +56,16 @@ config QLCNIC_DCB
56 mode of DCB is supported. PG and PFC values are related only 56 mode of DCB is supported. PG and PFC values are related only
57 to Tx. 57 to Tx.
58 58
59config QLCNIC_VXLAN
60 bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
61 default n
62 depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
63 ---help---
64 This enables hardware offload support for VXLAN protocol over QLogic's
65 84XX series adapters.
66 Say Y here if you want to enable hardware offload support for
67 Virtual eXtensible Local Area Network (VXLAN) in the driver.
68
59config QLGE 69config QLGE
60 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 70 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
61 depends on PCI 71 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 70849dea32b1..f09c35d669b3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
643 643
644 if (adapter->msix_supported) { 644 if (adapter->msix_supported) {
645 netxen_init_msix_entries(adapter, num_msix); 645 netxen_init_msix_entries(adapter, num_msix);
646 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 646 err = pci_enable_msix_range(pdev, adapter->msix_entries,
647 if (err == 0) { 647 num_msix, num_msix);
648 if (err > 0) {
648 adapter->flags |= NETXEN_NIC_MSIX_ENABLED; 649 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
649 netxen_set_msix_bit(pdev, 1); 650 netxen_set_msix_bit(pdev, 1);
650 651
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f19f81cde134..f31bb5e9d8a9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 3 40#define _QLCNIC_LINUX_MINOR 3
41#define _QLCNIC_LINUX_SUBVERSION 55 41#define _QLCNIC_LINUX_SUBVERSION 57
42#define QLCNIC_LINUX_VERSIONID "5.3.55" 42#define QLCNIC_LINUX_VERSIONID "5.3.57"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -169,11 +169,20 @@ struct cmd_desc_type0 {
169 169
170 __le64 addr_buffer2; 170 __le64 addr_buffer2;
171 171
172 __le16 reference_handle; 172 __le16 encap_descr; /* 15:10 offset of outer L3 header,
173 * 9:6 number of 32bit words in outer L3 header,
174 * 5 offload outer L4 checksum,
175 * 4 offload outer L3 checksum,
176 * 3 Inner L4 type, TCP=0, UDP=1,
177 * 2 Inner L3 type, IPv4=0, IPv6=1,
178 * 1 Outer L3 type,IPv4=0, IPv6=1,
179 * 0 type of encapsulation, GRE=0, VXLAN=1
180 */
173 __le16 mss; 181 __le16 mss;
174 u8 port_ctxid; /* 7:4 ctxid 3:0 port */ 182 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
175 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ 183 u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
176 __le16 conn_id; /* IPSec offoad only */ 184 u8 outer_hdr_length; /* Encapsulation only */
185 u8 rsvd1;
177 186
178 __le64 addr_buffer3; 187 __le64 addr_buffer3;
179 __le64 addr_buffer1; 188 __le64 addr_buffer1;
@@ -183,7 +192,9 @@ struct cmd_desc_type0 {
183 __le64 addr_buffer4; 192 __le64 addr_buffer4;
184 193
185 u8 eth_addr[ETH_ALEN]; 194 u8 eth_addr[ETH_ALEN];
186 __le16 vlan_TCI; 195 __le16 vlan_TCI; /* In case of encapsulation,
196 * this is for outer VLAN
197 */
187 198
188} __attribute__ ((aligned(64))); 199} __attribute__ ((aligned(64)));
189 200
@@ -394,7 +405,7 @@ struct qlcnic_nic_intr_coalesce {
394 u32 timer_out; 405 u32 timer_out;
395}; 406};
396 407
397struct qlcnic_dump_template_hdr { 408struct qlcnic_83xx_dump_template_hdr {
398 u32 type; 409 u32 type;
399 u32 offset; 410 u32 offset;
400 u32 size; 411 u32 size;
@@ -411,15 +422,42 @@ struct qlcnic_dump_template_hdr {
411 u32 rsvd[0]; 422 u32 rsvd[0];
412}; 423};
413 424
425struct qlcnic_82xx_dump_template_hdr {
426 u32 type;
427 u32 offset;
428 u32 size;
429 u32 cap_mask;
430 u32 num_entries;
431 u32 version;
432 u32 timestamp;
433 u32 checksum;
434 u32 drv_cap_mask;
435 u32 sys_info[3];
436 u32 saved_state[16];
437 u32 cap_sizes[8];
438 u32 rsvd[7];
439 u32 capabilities;
440 u32 rsvd1[0];
441};
442
414struct qlcnic_fw_dump { 443struct qlcnic_fw_dump {
415 u8 clr; /* flag to indicate if dump is cleared */ 444 u8 clr; /* flag to indicate if dump is cleared */
416 bool enable; /* enable/disable dump */ 445 bool enable; /* enable/disable dump */
417 u32 size; /* total size of the dump */ 446 u32 size; /* total size of the dump */
447 u32 cap_mask; /* Current capture mask */
418 void *data; /* dump data area */ 448 void *data; /* dump data area */
419 struct qlcnic_dump_template_hdr *tmpl_hdr; 449 void *tmpl_hdr;
420 dma_addr_t phys_addr; 450 dma_addr_t phys_addr;
421 void *dma_buffer; 451 void *dma_buffer;
422 bool use_pex_dma; 452 bool use_pex_dma;
453 /* Read only elements which are common between 82xx and 83xx
454 * template header. Update these values immediately after we read
455 * template header from Firmware
456 */
457 u32 tmpl_hdr_size;
458 u32 version;
459 u32 num_entries;
460 u32 offset;
423}; 461};
424 462
425/* 463/*
@@ -497,6 +535,7 @@ struct qlcnic_hardware_context {
497 u8 extend_lb_time; 535 u8 extend_lb_time;
498 u8 phys_port_id[ETH_ALEN]; 536 u8 phys_port_id[ETH_ALEN];
499 u8 lb_mode; 537 u8 lb_mode;
538 u16 vxlan_port;
500}; 539};
501 540
502struct qlcnic_adapter_stats { 541struct qlcnic_adapter_stats {
@@ -511,6 +550,9 @@ struct qlcnic_adapter_stats {
511 u64 txbytes; 550 u64 txbytes;
512 u64 lrobytes; 551 u64 lrobytes;
513 u64 lso_frames; 552 u64 lso_frames;
553 u64 encap_lso_frames;
554 u64 encap_tx_csummed;
555 u64 encap_rx_csummed;
514 u64 xmit_on; 556 u64 xmit_on;
515 u64 xmit_off; 557 u64 xmit_off;
516 u64 skb_alloc_failure; 558 u64 skb_alloc_failure;
@@ -872,6 +914,10 @@ struct qlcnic_mac_vlan_list {
872#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 914#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
873#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9 915#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
874 916
917#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
918#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
919#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
920
875/* module types */ 921/* module types */
876#define LINKEVENT_MODULE_NOT_PRESENT 1 922#define LINKEVENT_MODULE_NOT_PRESENT 1
877#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 923#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
@@ -966,6 +1012,11 @@ struct qlcnic_ipaddr {
966#define QLCNIC_HAS_PHYS_PORT_ID 0x40000 1012#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
967#define QLCNIC_TSS_RSS 0x80000 1013#define QLCNIC_TSS_RSS 0x80000
968 1014
1015#ifdef CONFIG_QLCNIC_VXLAN
1016#define QLCNIC_ADD_VXLAN_PORT 0x100000
1017#define QLCNIC_DEL_VXLAN_PORT 0x200000
1018#endif
1019
969#define QLCNIC_IS_MSI_FAMILY(adapter) \ 1020#define QLCNIC_IS_MSI_FAMILY(adapter) \
970 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 1021 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
971#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 1022#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1769,10 +1820,28 @@ struct qlcnic_hardware_ops {
1769 struct qlcnic_host_tx_ring *); 1820 struct qlcnic_host_tx_ring *);
1770 void (*disable_tx_intr) (struct qlcnic_adapter *, 1821 void (*disable_tx_intr) (struct qlcnic_adapter *,
1771 struct qlcnic_host_tx_ring *); 1822 struct qlcnic_host_tx_ring *);
1823 u32 (*get_saved_state)(void *, u32);
1824 void (*set_saved_state)(void *, u32, u32);
1825 void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
1826 u32 (*get_cap_size)(void *, int);
1827 void (*set_sys_info)(void *, int, u32);
1828 void (*store_cap_mask)(void *, u32);
1772}; 1829};
1773 1830
1774extern struct qlcnic_nic_template qlcnic_vf_ops; 1831extern struct qlcnic_nic_template qlcnic_vf_ops;
1775 1832
1833static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
1834{
1835 return adapter->ahw->extra_capability[0] &
1836 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
1837}
1838
1839static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
1840{
1841 return adapter->ahw->extra_capability[0] &
1842 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
1843}
1844
1776static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) 1845static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1777{ 1846{
1778 return adapter->nic_ops->start_firmware(adapter); 1847 return adapter->nic_ops->start_firmware(adapter);
@@ -2007,6 +2076,42 @@ static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
2007 adapter->ahw->hw_ops->read_phys_port_id(adapter); 2076 adapter->ahw->hw_ops->read_phys_port_id(adapter);
2008} 2077}
2009 2078
2079static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
2080 void *t_hdr, u32 index)
2081{
2082 return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
2083}
2084
2085static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
2086 void *t_hdr, u32 index, u32 value)
2087{
2088 adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
2089}
2090
2091static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
2092 struct qlcnic_fw_dump *fw_dump)
2093{
2094 adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
2095}
2096
2097static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
2098 void *tmpl_hdr, int index)
2099{
2100 return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
2101}
2102
2103static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
2104 void *tmpl_hdr, int idx, u32 value)
2105{
2106 adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
2107}
2108
2109static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
2110 void *tmpl_hdr, u32 mask)
2111{
2112 adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
2113}
2114
2010static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 2115static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
2011 u32 key) 2116 u32 key)
2012{ 2117{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 27c4f131863b..b7cffb46a75d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -77,7 +77,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
77 {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2}, 77 {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
78 {QLCNIC_CMD_GET_LINK_STATUS, 2, 4}, 78 {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
79 {QLCNIC_CMD_IDC_ACK, 5, 1}, 79 {QLCNIC_CMD_IDC_ACK, 5, 1},
80 {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1}, 80 {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
81 {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, 81 {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
82 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, 82 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
83 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, 83 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
@@ -87,6 +87,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
87 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, 87 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
88 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, 88 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
89 {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50}, 89 {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
90 {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
90}; 91};
91 92
92const u32 qlcnic_83xx_ext_reg_tbl[] = { 93const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -203,7 +204,12 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
203 .disable_sds_intr = qlcnic_83xx_disable_sds_intr, 204 .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
204 .enable_tx_intr = qlcnic_83xx_enable_tx_intr, 205 .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
205 .disable_tx_intr = qlcnic_83xx_disable_tx_intr, 206 .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
206 207 .get_saved_state = qlcnic_83xx_get_saved_state,
208 .set_saved_state = qlcnic_83xx_set_saved_state,
209 .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
210 .get_cap_size = qlcnic_83xx_get_cap_size,
211 .set_sys_info = qlcnic_83xx_set_sys_info,
212 .store_cap_mask = qlcnic_83xx_store_cap_mask,
207}; 213};
208 214
209static struct qlcnic_nic_template qlcnic_83xx_ops = { 215static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index f92485ca21d1..88d809c35633 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -308,6 +308,8 @@ struct qlc_83xx_reset {
308#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020 308#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
309 309
310struct qlcnic_adapter; 310struct qlcnic_adapter;
311struct qlcnic_fw_dump;
312
311struct qlc_83xx_idc { 313struct qlc_83xx_idc {
312 int (*state_entry) (struct qlcnic_adapter *); 314 int (*state_entry) (struct qlcnic_adapter *);
313 u64 sec_counter; 315 u64 sec_counter;
@@ -526,8 +528,9 @@ enum qlc_83xx_ext_regs {
526}; 528};
527 529
528/* Initialize/Stop NIC command bit definitions */ 530/* Initialize/Stop NIC command bit definitions */
529#define QLC_REGISTER_DCB_AEN BIT_1
530#define QLC_REGISTER_LB_IDC BIT_0 531#define QLC_REGISTER_LB_IDC BIT_0
532#define QLC_REGISTER_DCB_AEN BIT_1
533#define QLC_83XX_MULTI_TENANCY_INFO BIT_29
531#define QLC_INIT_FW_RESOURCES BIT_31 534#define QLC_INIT_FW_RESOURCES BIT_31
532 535
533/* 83xx funcitons */ 536/* 83xx funcitons */
@@ -650,4 +653,10 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
650void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *); 653void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
651int qlcnic_83xx_aer_reset(struct qlcnic_adapter *); 654int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
652void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *); 655void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
656u32 qlcnic_83xx_get_saved_state(void *, u32);
657void qlcnic_83xx_set_saved_state(void *, u32, u32);
658void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
659u32 qlcnic_83xx_get_cap_size(void *, int);
660void qlcnic_83xx_set_sys_info(void *, int, u32);
661void qlcnic_83xx_store_cap_mask(void *, u32);
653#endif 662#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 90a2dda351ec..b48737dcd3c5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,10 +1020,99 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
1023#ifdef CONFIG_QLCNIC_VXLAN
1024#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
1025#define QLC_83XX_MATCH_ENCAP_ID BIT_2
1026#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
1027#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
1028
1029#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
1030#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
1031
1032static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
1033{
1034 u16 port = adapter->ahw->vxlan_port;
1035 struct qlcnic_cmd_args cmd;
1036 int ret = 0;
1037
1038 memset(&cmd, 0, sizeof(cmd));
1039
1040 ret = qlcnic_alloc_mbx_args(&cmd, adapter,
1041 QLCNIC_CMD_INIT_NIC_FUNC);
1042 if (ret)
1043 return ret;
1044
1045 cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
1046 cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
1047 QLC_83XX_SET_VXLAN_UDP_DPORT |
1048 QLC_83XX_VXLAN_UDP_DPORT(port);
1049
1050 ret = qlcnic_issue_cmd(adapter, &cmd);
1051 if (ret)
1052 netdev_err(adapter->netdev,
1053 "Failed to set VXLAN port %d in adapter\n",
1054 port);
1055
1056 qlcnic_free_mbx_args(&cmd);
1057
1058 return ret;
1059}
1060
1061static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
1062 bool state)
1063{
1064 u16 vxlan_port = adapter->ahw->vxlan_port;
1065 struct qlcnic_cmd_args cmd;
1066 int ret = 0;
1067
1068 memset(&cmd, 0, sizeof(cmd));
1069
1070 ret = qlcnic_alloc_mbx_args(&cmd, adapter,
1071 QLCNIC_CMD_SET_INGRESS_ENCAP);
1072 if (ret)
1073 return ret;
1074
1075 cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
1076 QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
1077
1078 ret = qlcnic_issue_cmd(adapter, &cmd);
1079 if (ret)
1080 netdev_err(adapter->netdev,
1081 "Failed to %s VXLAN parsing for port %d\n",
1082 state ? "enable" : "disable", vxlan_port);
1083 else
1084 netdev_info(adapter->netdev,
1085 "%s VXLAN parsing for port %d\n",
1086 state ? "Enabled" : "Disabled", vxlan_port);
1087
1088 qlcnic_free_mbx_args(&cmd);
1089
1090 return ret;
1091}
1092#endif
1093
1023static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) 1094static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
1024{ 1095{
1025 if (adapter->fhash.fnum) 1096 if (adapter->fhash.fnum)
1026 qlcnic_prune_lb_filters(adapter); 1097 qlcnic_prune_lb_filters(adapter);
1098
1099#ifdef CONFIG_QLCNIC_VXLAN
1100 if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
1101 if (qlcnic_set_vxlan_port(adapter))
1102 return;
1103
1104 if (qlcnic_set_vxlan_parsing(adapter, true))
1105 return;
1106
1107 adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
1108 } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
1109 if (qlcnic_set_vxlan_parsing(adapter, false))
1110 return;
1111
1112 adapter->ahw->vxlan_port = 0;
1113 adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
1114 }
1115#endif
1027} 1116}
1028 1117
1029/** 1118/**
@@ -1301,7 +1390,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1301 addr = (u64)dest; 1390 addr = (u64)dest;
1302 1391
1303 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1392 ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
1304 (u32 *)p_cache, size / 16); 1393 p_cache, size / 16);
1305 if (ret) { 1394 if (ret) {
1306 dev_err(&adapter->pdev->dev, "MS memory write failed\n"); 1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n");
1307 release_firmware(fw); 1396 release_firmware(fw);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index acee1a5d80c6..5bacf5210aed 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -47,6 +47,12 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
47 {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, 47 {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
48 {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, 48 {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
49 {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, 49 {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
50 {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
51 QLC_OFF(stats.encap_lso_frames)},
52 {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
53 QLC_OFF(stats.encap_tx_csummed)},
54 {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
55 QLC_OFF(stats.encap_rx_csummed)},
50 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), 56 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
51 QLC_OFF(stats.skb_alloc_failure)}, 57 QLC_OFF(stats.skb_alloc_failure)},
52 {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun), 58 {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
@@ -1639,14 +1645,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1639 } 1645 }
1640 1646
1641 if (fw_dump->clr) 1647 if (fw_dump->clr)
1642 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; 1648 dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
1643 else 1649 else
1644 dump->len = 0; 1650 dump->len = 0;
1645 1651
1646 if (!qlcnic_check_fw_dump_state(adapter)) 1652 if (!qlcnic_check_fw_dump_state(adapter))
1647 dump->flag = ETH_FW_DUMP_DISABLE; 1653 dump->flag = ETH_FW_DUMP_DISABLE;
1648 else 1654 else
1649 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; 1655 dump->flag = fw_dump->cap_mask;
1650 1656
1651 dump->version = adapter->fw_version; 1657 dump->version = adapter->fw_version;
1652 return 0; 1658 return 0;
@@ -1671,9 +1677,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1671 netdev_info(netdev, "Dump not available\n"); 1677 netdev_info(netdev, "Dump not available\n");
1672 return -EINVAL; 1678 return -EINVAL;
1673 } 1679 }
1680
1674 /* Copy template header first */ 1681 /* Copy template header first */
1675 copy_sz = fw_dump->tmpl_hdr->size; 1682 copy_sz = fw_dump->tmpl_hdr_size;
1676 hdr_ptr = (u32 *) fw_dump->tmpl_hdr; 1683 hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
1677 data = buffer; 1684 data = buffer;
1678 for (i = 0; i < copy_sz/sizeof(u32); i++) 1685 for (i = 0; i < copy_sz/sizeof(u32); i++)
1679 *data++ = cpu_to_le32(*hdr_ptr++); 1686 *data++ = cpu_to_le32(*hdr_ptr++);
@@ -1681,7 +1688,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1681 /* Copy captured dump data */ 1688 /* Copy captured dump data */
1682 memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); 1689 memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
1683 dump->len = copy_sz + fw_dump->size; 1690 dump->len = copy_sz + fw_dump->size;
1684 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; 1691 dump->flag = fw_dump->cap_mask;
1685 1692
1686 /* Free dump area once data has been captured */ 1693 /* Free dump area once data has been captured */
1687 vfree(fw_dump->data); 1694 vfree(fw_dump->data);
@@ -1703,7 +1710,11 @@ static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
1703 return -EOPNOTSUPP; 1710 return -EOPNOTSUPP;
1704 } 1711 }
1705 1712
1706 fw_dump->tmpl_hdr->drv_cap_mask = mask; 1713 fw_dump->cap_mask = mask;
1714
1715 /* Store new capture mask in template header as well*/
1716 qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
1717
1707 netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask); 1718 netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
1708 return 0; 1719 return 0;
1709} 1720}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 03d18a0be6ce..9f3adf4e70b5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,9 +317,7 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
317int 317int
318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) 318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
319{ 319{
320 int timeout = 0; 320 int timeout = 0, err = 0, done = 0;
321 int err = 0;
322 u32 done = 0;
323 321
324 while (!done) { 322 while (!done) {
325 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)), 323 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
@@ -327,10 +325,20 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
327 if (done == 1) 325 if (done == 1)
328 break; 326 break;
329 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { 327 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
330 dev_err(&adapter->pdev->dev, 328 if (id_reg) {
331 "Failed to acquire sem=%d lock; holdby=%d\n", 329 done = QLCRD32(adapter, id_reg, &err);
332 sem, 330 if (done != -1)
333 id_reg ? QLCRD32(adapter, id_reg, &err) : -1); 331 dev_err(&adapter->pdev->dev,
332 "Failed to acquire sem=%d lock held by=%d\n",
333 sem, done);
334 else
335 dev_err(&adapter->pdev->dev,
336 "Failed to acquire sem=%d lock",
337 sem);
338 } else {
339 dev_err(&adapter->pdev->dev,
340 "Failed to acquire sem=%d lock", sem);
341 }
334 return -EIO; 342 return -EIO;
335 } 343 }
336 msleep(1); 344 msleep(1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 63d75617d445..cbe2399c30a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -98,6 +98,7 @@ enum qlcnic_regs {
98#define QLCNIC_CMD_GET_LINK_EVENT 0x48 98#define QLCNIC_CMD_GET_LINK_EVENT 0x48
99#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49 99#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
100#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A 100#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
101#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E
101#define QLCNIC_CMD_INIT_NIC_FUNC 0x60 102#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
102#define QLCNIC_CMD_STOP_NIC_FUNC 0x61 103#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
103#define QLCNIC_CMD_IDC_ACK 0x63 104#define QLCNIC_CMD_IDC_ACK 0x63
@@ -161,6 +162,7 @@ struct qlcnic_host_sds_ring;
161struct qlcnic_host_tx_ring; 162struct qlcnic_host_tx_ring;
162struct qlcnic_hardware_context; 163struct qlcnic_hardware_context;
163struct qlcnic_adapter; 164struct qlcnic_adapter;
165struct qlcnic_fw_dump;
164 166
165int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *); 167int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
166int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); 168int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
@@ -213,4 +215,11 @@ int qlcnic_82xx_shutdown(struct pci_dev *);
213int qlcnic_82xx_resume(struct qlcnic_adapter *); 215int qlcnic_82xx_resume(struct qlcnic_adapter *);
214void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed); 216void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
215void qlcnic_fw_poll_work(struct work_struct *work); 217void qlcnic_fw_poll_work(struct work_struct *work);
218
219u32 qlcnic_82xx_get_saved_state(void *, u32);
220void qlcnic_82xx_set_saved_state(void *, u32, u32);
221void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
222u32 qlcnic_82xx_get_cap_size(void *, int);
223void qlcnic_82xx_set_sys_info(void *, int, u32);
224void qlcnic_82xx_store_cap_mask(void *, u32);
216#endif /* __QLCNIC_HW_H_ */ 225#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 54ebf300332a..173b3d12991f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -13,16 +13,19 @@
13 13
14#include "qlcnic.h" 14#include "qlcnic.h"
15 15
16#define TX_ETHER_PKT 0x01 16#define QLCNIC_TX_ETHER_PKT 0x01
17#define TX_TCP_PKT 0x02 17#define QLCNIC_TX_TCP_PKT 0x02
18#define TX_UDP_PKT 0x03 18#define QLCNIC_TX_UDP_PKT 0x03
19#define TX_IP_PKT 0x04 19#define QLCNIC_TX_IP_PKT 0x04
20#define TX_TCP_LSO 0x05 20#define QLCNIC_TX_TCP_LSO 0x05
21#define TX_TCP_LSO6 0x06 21#define QLCNIC_TX_TCP_LSO6 0x06
22#define TX_TCPV6_PKT 0x0b 22#define QLCNIC_TX_ENCAP_PKT 0x07
23#define TX_UDPV6_PKT 0x0c 23#define QLCNIC_TX_ENCAP_LSO 0x08
24#define FLAGS_VLAN_TAGGED 0x10 24#define QLCNIC_TX_TCPV6_PKT 0x0b
25#define FLAGS_VLAN_OOB 0x40 25#define QLCNIC_TX_UDPV6_PKT 0x0c
26
27#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
28#define QLCNIC_FLAGS_VLAN_OOB 0x40
26 29
27#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ 30#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
28 (cmd_desc)->vlan_TCI = cpu_to_le16(v); 31 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
@@ -364,6 +367,101 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
364 spin_unlock(&adapter->mac_learn_lock); 367 spin_unlock(&adapter->mac_learn_lock);
365} 368}
366 369
370#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
371#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
372#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
373#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
374#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
375#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
376
377static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
378 struct cmd_desc_type0 *first_desc,
379 struct sk_buff *skb,
380 struct qlcnic_host_tx_ring *tx_ring)
381{
382 u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
383 int copied, copy_len, descr_size;
384 u32 producer = tx_ring->producer;
385 struct cmd_desc_type0 *hwdesc;
386 u16 flags = 0, encap_descr = 0;
387
388 opcode = QLCNIC_TX_ETHER_PKT;
389 encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
390
391 if (skb_is_gso(skb)) {
392 inner_hdr_len = skb_inner_transport_header(skb) +
393 inner_tcp_hdrlen(skb) -
394 skb_inner_mac_header(skb);
395
396 /* VXLAN header size = 8 */
397 outer_hdr_len = skb_transport_offset(skb) + 8 +
398 sizeof(struct udphdr);
399 first_desc->outer_hdr_length = outer_hdr_len;
400 total_hdr_len = inner_hdr_len + outer_hdr_len;
401 encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
402 QLCNIC_ENCAP_DO_L4_CSUM;
403 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
404 first_desc->hdr_length = inner_hdr_len;
405
406 /* Copy inner and outer headers in Tx descriptor(s)
407 * If total_hdr_len > cmd_desc_type0, use multiple
408 * descriptors
409 */
410 copied = 0;
411 descr_size = (int)sizeof(struct cmd_desc_type0);
412 while (copied < total_hdr_len) {
413 copy_len = min(descr_size, (total_hdr_len - copied));
414 hwdesc = &tx_ring->desc_head[producer];
415 tx_ring->cmd_buf_arr[producer].skb = NULL;
416 skb_copy_from_linear_data_offset(skb, copied,
417 (char *)hwdesc,
418 copy_len);
419 copied += copy_len;
420 producer = get_next_index(producer, tx_ring->num_desc);
421 }
422
423 tx_ring->producer = producer;
424
425 /* Make sure updated tx_ring->producer is visible
426 * for qlcnic_tx_avail()
427 */
428 smp_mb();
429 adapter->stats.encap_lso_frames++;
430
431 opcode = QLCNIC_TX_ENCAP_LSO;
432 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
433 if (inner_ip_hdr(skb)->version == 6) {
434 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
435 encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
436 } else {
437 if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
438 encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
439 }
440
441 adapter->stats.encap_tx_csummed++;
442 opcode = QLCNIC_TX_ENCAP_PKT;
443 }
444
445 /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
446 if (ip_hdr(skb)->version == 6)
447 encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
448
449 /* outer IP header's size in 32bit words size*/
450 encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
451
452 /* outer IP header offset */
453 encap_descr |= skb_network_offset(skb) << 10;
454 first_desc->encap_descr = cpu_to_le16(encap_descr);
455
456 first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
457 skb->data;
458 first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
459
460 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
461
462 return 0;
463}
464
367static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, 465static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
368 struct cmd_desc_type0 *first_desc, struct sk_buff *skb, 466 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
369 struct qlcnic_host_tx_ring *tx_ring) 467 struct qlcnic_host_tx_ring *tx_ring)
@@ -378,11 +476,11 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
378 476
379 if (protocol == ETH_P_8021Q) { 477 if (protocol == ETH_P_8021Q) {
380 vh = (struct vlan_ethhdr *)skb->data; 478 vh = (struct vlan_ethhdr *)skb->data;
381 flags = FLAGS_VLAN_TAGGED; 479 flags = QLCNIC_FLAGS_VLAN_TAGGED;
382 vlan_tci = ntohs(vh->h_vlan_TCI); 480 vlan_tci = ntohs(vh->h_vlan_TCI);
383 protocol = ntohs(vh->h_vlan_encapsulated_proto); 481 protocol = ntohs(vh->h_vlan_encapsulated_proto);
384 } else if (vlan_tx_tag_present(skb)) { 482 } else if (vlan_tx_tag_present(skb)) {
385 flags = FLAGS_VLAN_OOB; 483 flags = QLCNIC_FLAGS_VLAN_OOB;
386 vlan_tci = vlan_tx_tag_get(skb); 484 vlan_tci = vlan_tx_tag_get(skb);
387 } 485 }
388 if (unlikely(adapter->tx_pvid)) { 486 if (unlikely(adapter->tx_pvid)) {
@@ -391,7 +489,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
391 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 489 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
392 goto set_flags; 490 goto set_flags;
393 491
394 flags = FLAGS_VLAN_OOB; 492 flags = QLCNIC_FLAGS_VLAN_OOB;
395 vlan_tci = adapter->tx_pvid; 493 vlan_tci = adapter->tx_pvid;
396 } 494 }
397set_flags: 495set_flags:
@@ -402,25 +500,26 @@ set_flags:
402 flags |= BIT_0; 500 flags |= BIT_0;
403 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); 501 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
404 } 502 }
405 opcode = TX_ETHER_PKT; 503 opcode = QLCNIC_TX_ETHER_PKT;
406 if (skb_is_gso(skb)) { 504 if (skb_is_gso(skb)) {
407 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 505 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
408 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 506 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
409 first_desc->total_hdr_length = hdr_len; 507 first_desc->hdr_length = hdr_len;
410 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; 508 opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
509 QLCNIC_TX_TCP_LSO;
411 510
412 /* For LSO, we need to copy the MAC/IP/TCP headers into 511 /* For LSO, we need to copy the MAC/IP/TCP headers into
413 * the descriptor ring */ 512 * the descriptor ring */
414 copied = 0; 513 copied = 0;
415 offset = 2; 514 offset = 2;
416 515
417 if (flags & FLAGS_VLAN_OOB) { 516 if (flags & QLCNIC_FLAGS_VLAN_OOB) {
418 first_desc->total_hdr_length += VLAN_HLEN; 517 first_desc->hdr_length += VLAN_HLEN;
419 first_desc->tcp_hdr_offset = VLAN_HLEN; 518 first_desc->tcp_hdr_offset = VLAN_HLEN;
420 first_desc->ip_hdr_offset = VLAN_HLEN; 519 first_desc->ip_hdr_offset = VLAN_HLEN;
421 520
422 /* Only in case of TSO on vlan device */ 521 /* Only in case of TSO on vlan device */
423 flags |= FLAGS_VLAN_TAGGED; 522 flags |= QLCNIC_FLAGS_VLAN_TAGGED;
424 523
425 /* Create a TSO vlan header template for firmware */ 524 /* Create a TSO vlan header template for firmware */
426 hwdesc = &tx_ring->desc_head[producer]; 525 hwdesc = &tx_ring->desc_head[producer];
@@ -464,16 +563,16 @@ set_flags:
464 l4proto = ip_hdr(skb)->protocol; 563 l4proto = ip_hdr(skb)->protocol;
465 564
466 if (l4proto == IPPROTO_TCP) 565 if (l4proto == IPPROTO_TCP)
467 opcode = TX_TCP_PKT; 566 opcode = QLCNIC_TX_TCP_PKT;
468 else if (l4proto == IPPROTO_UDP) 567 else if (l4proto == IPPROTO_UDP)
469 opcode = TX_UDP_PKT; 568 opcode = QLCNIC_TX_UDP_PKT;
470 } else if (protocol == ETH_P_IPV6) { 569 } else if (protocol == ETH_P_IPV6) {
471 l4proto = ipv6_hdr(skb)->nexthdr; 570 l4proto = ipv6_hdr(skb)->nexthdr;
472 571
473 if (l4proto == IPPROTO_TCP) 572 if (l4proto == IPPROTO_TCP)
474 opcode = TX_TCPV6_PKT; 573 opcode = QLCNIC_TX_TCPV6_PKT;
475 else if (l4proto == IPPROTO_UDP) 574 else if (l4proto == IPPROTO_UDP)
476 opcode = TX_UDPV6_PKT; 575 opcode = QLCNIC_TX_UDPV6_PKT;
477 } 576 }
478 } 577 }
479 first_desc->tcp_hdr_offset += skb_transport_offset(skb); 578 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
@@ -563,6 +662,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
563 struct ethhdr *phdr; 662 struct ethhdr *phdr;
564 int i, k, frag_count, delta = 0; 663 int i, k, frag_count, delta = 0;
565 u32 producer, num_txd; 664 u32 producer, num_txd;
665 u16 protocol;
666 bool l4_is_udp = false;
566 667
567 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 668 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
568 netif_tx_stop_all_queues(netdev); 669 netif_tx_stop_all_queues(netdev);
@@ -653,8 +754,23 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
653 tx_ring->producer = get_next_index(producer, num_txd); 754 tx_ring->producer = get_next_index(producer, num_txd);
654 smp_mb(); 755 smp_mb();
655 756
656 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring))) 757 protocol = ntohs(skb->protocol);
657 goto unwind_buff; 758 if (protocol == ETH_P_IP)
759 l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
760 else if (protocol == ETH_P_IPV6)
761 l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
762
763 /* Check if it is a VXLAN packet */
764 if (!skb->encapsulation || !l4_is_udp ||
765 !qlcnic_encap_tx_offload(adapter)) {
766 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
767 tx_ring)))
768 goto unwind_buff;
769 } else {
770 if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
771 skb, tx_ring)))
772 goto unwind_buff;
773 }
658 774
659 if (adapter->drv_mac_learn) 775 if (adapter->drv_mac_learn)
660 qlcnic_send_filter(adapter, first_desc, skb); 776 qlcnic_send_filter(adapter, first_desc, skb);
@@ -1587,6 +1703,13 @@ static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1587 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0; 1703 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1588} 1704}
1589 1705
1706#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
1707
1708static inline u8 qlcnic_encap_length(u64 sts_data)
1709{
1710 return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
1711}
1712
1590static struct qlcnic_rx_buffer * 1713static struct qlcnic_rx_buffer *
1591qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, 1714qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1592 struct qlcnic_host_sds_ring *sds_ring, 1715 struct qlcnic_host_sds_ring *sds_ring,
@@ -1637,6 +1760,12 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1637 1760
1638 skb->protocol = eth_type_trans(skb, netdev); 1761 skb->protocol = eth_type_trans(skb, netdev);
1639 1762
1763 if (qlcnic_encap_length(sts_data[1]) &&
1764 skb->ip_summed == CHECKSUM_UNNECESSARY) {
1765 skb->encapsulation = 1;
1766 adapter->stats.encap_rx_csummed++;
1767 }
1768
1640 if (vid != 0xffff) 1769 if (vid != 0xffff)
1641 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1770 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1642 1771
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1222865cfb73..309d05640883 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -21,6 +21,9 @@
21#include <linux/aer.h> 21#include <linux/aer.h>
22#include <linux/log2.h> 22#include <linux/log2.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#ifdef CONFIG_QLCNIC_VXLAN
25#include <net/vxlan.h>
26#endif
24 27
25MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); 28MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
26MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
@@ -90,7 +93,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *);
90static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); 93static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
91static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *, 94static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
92 pci_channel_state_t); 95 pci_channel_state_t);
93
94static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) 96static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
95{ 97{
96 struct qlcnic_hardware_context *ahw = adapter->ahw; 98 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -462,6 +464,37 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
462 return 0; 464 return 0;
463} 465}
464 466
467#ifdef CONFIG_QLCNIC_VXLAN
468static void qlcnic_add_vxlan_port(struct net_device *netdev,
469 sa_family_t sa_family, __be16 port)
470{
471 struct qlcnic_adapter *adapter = netdev_priv(netdev);
472 struct qlcnic_hardware_context *ahw = adapter->ahw;
473
474 /* Adapter supports only one VXLAN port. Use very first port
475 * for enabling offload
476 */
477 if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
478 return;
479
480 ahw->vxlan_port = ntohs(port);
481 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
482}
483
484static void qlcnic_del_vxlan_port(struct net_device *netdev,
485 sa_family_t sa_family, __be16 port)
486{
487 struct qlcnic_adapter *adapter = netdev_priv(netdev);
488 struct qlcnic_hardware_context *ahw = adapter->ahw;
489
490 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
491 (ahw->vxlan_port != ntohs(port)))
492 return;
493
494 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
495}
496#endif
497
465static const struct net_device_ops qlcnic_netdev_ops = { 498static const struct net_device_ops qlcnic_netdev_ops = {
466 .ndo_open = qlcnic_open, 499 .ndo_open = qlcnic_open,
467 .ndo_stop = qlcnic_close, 500 .ndo_stop = qlcnic_close,
@@ -480,6 +513,10 @@ static const struct net_device_ops qlcnic_netdev_ops = {
480 .ndo_fdb_del = qlcnic_fdb_del, 513 .ndo_fdb_del = qlcnic_fdb_del,
481 .ndo_fdb_dump = qlcnic_fdb_dump, 514 .ndo_fdb_dump = qlcnic_fdb_dump,
482 .ndo_get_phys_port_id = qlcnic_get_phys_port_id, 515 .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
516#ifdef CONFIG_QLCNIC_VXLAN
517 .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
518 .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
519#endif
483#ifdef CONFIG_NET_POLL_CONTROLLER 520#ifdef CONFIG_NET_POLL_CONTROLLER
484 .ndo_poll_controller = qlcnic_poll_controller, 521 .ndo_poll_controller = qlcnic_poll_controller,
485#endif 522#endif
@@ -561,6 +598,12 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
561 .disable_sds_intr = qlcnic_82xx_disable_sds_intr, 598 .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
562 .enable_tx_intr = qlcnic_82xx_enable_tx_intr, 599 .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
563 .disable_tx_intr = qlcnic_82xx_disable_tx_intr, 600 .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
601 .get_saved_state = qlcnic_82xx_get_saved_state,
602 .set_saved_state = qlcnic_82xx_set_saved_state,
603 .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
604 .get_cap_size = qlcnic_82xx_get_cap_size,
605 .set_sys_info = qlcnic_82xx_set_sys_info,
606 .store_cap_mask = qlcnic_82xx_store_cap_mask,
564}; 607};
565 608
566static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) 609static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -684,7 +727,7 @@ restore:
684int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 727int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
685{ 728{
686 struct pci_dev *pdev = adapter->pdev; 729 struct pci_dev *pdev = adapter->pdev;
687 int err = -1, vector; 730 int err, vector;
688 731
689 if (!adapter->msix_entries) { 732 if (!adapter->msix_entries) {
690 adapter->msix_entries = kcalloc(num_msix, 733 adapter->msix_entries = kcalloc(num_msix,
@@ -701,13 +744,17 @@ enable_msix:
701 for (vector = 0; vector < num_msix; vector++) 744 for (vector = 0; vector < num_msix; vector++)
702 adapter->msix_entries[vector].entry = vector; 745 adapter->msix_entries[vector].entry = vector;
703 746
704 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 747 err = pci_enable_msix_range(pdev,
705 if (err == 0) { 748 adapter->msix_entries, 1, num_msix);
749
750 if (err == num_msix) {
706 adapter->flags |= QLCNIC_MSIX_ENABLED; 751 adapter->flags |= QLCNIC_MSIX_ENABLED;
707 adapter->ahw->num_msix = num_msix; 752 adapter->ahw->num_msix = num_msix;
708 dev_info(&pdev->dev, "using msi-x interrupts\n"); 753 dev_info(&pdev->dev, "using msi-x interrupts\n");
709 return err; 754 return 0;
710 } else if (err > 0) { 755 } else if (err > 0) {
756 pci_disable_msix(pdev);
757
711 dev_info(&pdev->dev, 758 dev_info(&pdev->dev,
712 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 759 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
713 num_msix, err); 760 num_msix, err);
@@ -715,12 +762,12 @@ enable_msix:
715 if (qlcnic_82xx_check(adapter)) { 762 if (qlcnic_82xx_check(adapter)) {
716 num_msix = rounddown_pow_of_two(err); 763 num_msix = rounddown_pow_of_two(err);
717 if (err < QLCNIC_82XX_MINIMUM_VECTOR) 764 if (err < QLCNIC_82XX_MINIMUM_VECTOR)
718 return -EIO; 765 return -ENOSPC;
719 } else { 766 } else {
720 num_msix = rounddown_pow_of_two(err - 1); 767 num_msix = rounddown_pow_of_two(err - 1);
721 num_msix += 1; 768 num_msix += 1;
722 if (err < QLCNIC_83XX_MINIMUM_VECTOR) 769 if (err < QLCNIC_83XX_MINIMUM_VECTOR)
723 return -EIO; 770 return -ENOSPC;
724 } 771 }
725 772
726 if (qlcnic_82xx_check(adapter) && 773 if (qlcnic_82xx_check(adapter) &&
@@ -747,7 +794,7 @@ enable_msix:
747 } 794 }
748 } 795 }
749 796
750 return err; 797 return -EIO;
751} 798}
752 799
753static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter) 800static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
@@ -1934,6 +1981,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
1934 1981
1935 qlcnic_create_sysfs_entries(adapter); 1982 qlcnic_create_sysfs_entries(adapter);
1936 1983
1984#ifdef CONFIG_QLCNIC_VXLAN
1985 if (qlcnic_encap_rx_offload(adapter))
1986 vxlan_get_rx_port(netdev);
1987#endif
1988
1937 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; 1989 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1938 return 0; 1990 return 0;
1939 1991
@@ -2196,6 +2248,19 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2196 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 2248 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
2197 netdev->features |= NETIF_F_LRO; 2249 netdev->features |= NETIF_F_LRO;
2198 2250
2251 if (qlcnic_encap_tx_offload(adapter)) {
2252 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
2253
2254 /* encapsulation Tx offload supported by Adapter */
2255 netdev->hw_enc_features = NETIF_F_IP_CSUM |
2256 NETIF_F_GSO_UDP_TUNNEL |
2257 NETIF_F_TSO |
2258 NETIF_F_TSO6;
2259 }
2260
2261 if (qlcnic_encap_rx_offload(adapter))
2262 netdev->hw_enc_features |= NETIF_F_RXCSUM;
2263
2199 netdev->hw_features = netdev->features; 2264 netdev->hw_features = netdev->features;
2200 netdev->priv_flags |= IFF_UNICAST_FLT; 2265 netdev->priv_flags |= IFF_UNICAST_FLT;
2201 netdev->irq = adapter->msix_entries[0].vector; 2266 netdev->irq = adapter->msix_entries[0].vector;
@@ -2442,8 +2507,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2442 if (err) { 2507 if (err) {
2443 switch (err) { 2508 switch (err) {
2444 case -ENOTRECOVERABLE: 2509 case -ENOTRECOVERABLE:
2445 dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n"); 2510 dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
2446 dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n"); 2511 dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
2447 goto err_out_free_hw; 2512 goto err_out_free_hw;
2448 case -ENOMEM: 2513 case -ENOMEM:
2449 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); 2514 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 7763962e2ec4..37b979b1266b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -211,6 +211,107 @@ enum qlcnic_minidump_opcode {
211 QLCNIC_DUMP_RDEND = 255 211 QLCNIC_DUMP_RDEND = 255
212}; 212};
213 213
214inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
215{
216 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
217
218 return hdr->saved_state[index];
219}
220
221inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
222 u32 value)
223{
224 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
225
226 hdr->saved_state[index] = value;
227}
228
229void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
230{
231 struct qlcnic_82xx_dump_template_hdr *hdr;
232
233 hdr = fw_dump->tmpl_hdr;
234 fw_dump->tmpl_hdr_size = hdr->size;
235 fw_dump->version = hdr->version;
236 fw_dump->num_entries = hdr->num_entries;
237 fw_dump->offset = hdr->offset;
238
239 hdr->drv_cap_mask = hdr->cap_mask;
240 fw_dump->cap_mask = hdr->cap_mask;
241}
242
243inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
244{
245 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
246
247 return hdr->cap_sizes[index];
248}
249
250void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
251{
252 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
253
254 hdr->sys_info[idx] = value;
255}
256
257void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
258{
259 struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
260
261 hdr->drv_cap_mask = mask;
262}
263
264inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
265{
266 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
267
268 return hdr->saved_state[index];
269}
270
271inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
272 u32 value)
273{
274 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
275
276 hdr->saved_state[index] = value;
277}
278
279void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
280{
281 struct qlcnic_83xx_dump_template_hdr *hdr;
282
283 hdr = fw_dump->tmpl_hdr;
284 fw_dump->tmpl_hdr_size = hdr->size;
285 fw_dump->version = hdr->version;
286 fw_dump->num_entries = hdr->num_entries;
287 fw_dump->offset = hdr->offset;
288
289 hdr->drv_cap_mask = hdr->cap_mask;
290 fw_dump->cap_mask = hdr->cap_mask;
291}
292
293inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
294{
295 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
296
297 return hdr->cap_sizes[index];
298}
299
300void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
301{
302 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
303
304 hdr->sys_info[idx] = value;
305}
306
307void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
308{
309 struct qlcnic_83xx_dump_template_hdr *hdr;
310
311 hdr = tmpl_hdr;
312 hdr->drv_cap_mask = mask;
313}
314
214struct qlcnic_dump_operations { 315struct qlcnic_dump_operations {
215 enum qlcnic_minidump_opcode opcode; 316 enum qlcnic_minidump_opcode opcode;
216 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *, 317 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
@@ -238,11 +339,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
238static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, 339static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
239 struct qlcnic_dump_entry *entry, __le32 *buffer) 340 struct qlcnic_dump_entry *entry, __le32 *buffer)
240{ 341{
342 void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
343 struct __ctrl *ctr = &entry->region.ctrl;
241 int i, k, timeout = 0; 344 int i, k, timeout = 0;
242 u32 addr, data; 345 u32 addr, data, temp;
243 u8 no_ops; 346 u8 no_ops;
244 struct __ctrl *ctr = &entry->region.ctrl;
245 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
246 347
247 addr = ctr->addr; 348 addr = ctr->addr;
248 no_ops = ctr->no_ops; 349 no_ops = ctr->no_ops;
@@ -285,29 +386,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
285 } 386 }
286 break; 387 break;
287 case QLCNIC_DUMP_RD_SAVE: 388 case QLCNIC_DUMP_RD_SAVE:
288 if (ctr->index_a) 389 temp = ctr->index_a;
289 addr = t_hdr->saved_state[ctr->index_a]; 390 if (temp)
391 addr = qlcnic_get_saved_state(adapter,
392 hdr,
393 temp);
290 data = qlcnic_ind_rd(adapter, addr); 394 data = qlcnic_ind_rd(adapter, addr);
291 t_hdr->saved_state[ctr->index_v] = data; 395 qlcnic_set_saved_state(adapter, hdr,
396 ctr->index_v, data);
292 break; 397 break;
293 case QLCNIC_DUMP_WRT_SAVED: 398 case QLCNIC_DUMP_WRT_SAVED:
294 if (ctr->index_v) 399 temp = ctr->index_v;
295 data = t_hdr->saved_state[ctr->index_v]; 400 if (temp)
401 data = qlcnic_get_saved_state(adapter,
402 hdr,
403 temp);
296 else 404 else
297 data = ctr->val1; 405 data = ctr->val1;
298 if (ctr->index_a) 406
299 addr = t_hdr->saved_state[ctr->index_a]; 407 temp = ctr->index_a;
408 if (temp)
409 addr = qlcnic_get_saved_state(adapter,
410 hdr,
411 temp);
300 qlcnic_ind_wr(adapter, addr, data); 412 qlcnic_ind_wr(adapter, addr, data);
301 break; 413 break;
302 case QLCNIC_DUMP_MOD_SAVE_ST: 414 case QLCNIC_DUMP_MOD_SAVE_ST:
303 data = t_hdr->saved_state[ctr->index_v]; 415 data = qlcnic_get_saved_state(adapter, hdr,
416 ctr->index_v);
304 data <<= ctr->shl_val; 417 data <<= ctr->shl_val;
305 data >>= ctr->shr_val; 418 data >>= ctr->shr_val;
306 if (ctr->val2) 419 if (ctr->val2)
307 data &= ctr->val2; 420 data &= ctr->val2;
308 data |= ctr->val3; 421 data |= ctr->val3;
309 data += ctr->val1; 422 data += ctr->val1;
310 t_hdr->saved_state[ctr->index_v] = data; 423 qlcnic_set_saved_state(adapter, hdr,
424 ctr->index_v, data);
311 break; 425 break;
312 default: 426 default:
313 dev_info(&adapter->pdev->dev, 427 dev_info(&adapter->pdev->dev,
@@ -544,7 +658,7 @@ out:
544static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, 658static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
545 struct __mem *mem) 659 struct __mem *mem)
546{ 660{
547 struct qlcnic_dump_template_hdr *tmpl_hdr; 661 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
548 struct device *dev = &adapter->pdev->dev; 662 struct device *dev = &adapter->pdev->dev;
549 u32 dma_no, dma_base_addr, temp_addr; 663 u32 dma_no, dma_base_addr, temp_addr;
550 int i, ret, dma_sts; 664 int i, ret, dma_sts;
@@ -596,7 +710,7 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
596 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 710 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
597 u32 temp, dma_base_addr, size = 0, read_size = 0; 711 u32 temp, dma_base_addr, size = 0, read_size = 0;
598 struct qlcnic_pex_dma_descriptor *dma_descr; 712 struct qlcnic_pex_dma_descriptor *dma_descr;
599 struct qlcnic_dump_template_hdr *tmpl_hdr; 713 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
600 struct device *dev = &adapter->pdev->dev; 714 struct device *dev = &adapter->pdev->dev;
601 dma_addr_t dma_phys_addr; 715 dma_addr_t dma_phys_addr;
602 void *dma_buffer; 716 void *dma_buffer;
@@ -938,8 +1052,8 @@ static int
938qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter, 1052qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
939 struct qlcnic_cmd_args *cmd) 1053 struct qlcnic_cmd_args *cmd)
940{ 1054{
941 struct qlcnic_dump_template_hdr tmp_hdr; 1055 struct qlcnic_83xx_dump_template_hdr tmp_hdr;
942 u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32); 1056 u32 size = sizeof(tmp_hdr) / sizeof(u32);
943 int ret = 0; 1057 int ret = 0;
944 1058
945 if (qlcnic_82xx_check(adapter)) 1059 if (qlcnic_82xx_check(adapter))
@@ -1027,17 +1141,19 @@ free_mem:
1027 return err; 1141 return err;
1028} 1142}
1029 1143
1144#define QLCNIC_TEMPLATE_VERSION (0x20001)
1145
1030int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 1146int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1031{ 1147{
1032 int err;
1033 u32 temp_size = 0;
1034 u32 version, csum, *tmp_buf;
1035 struct qlcnic_hardware_context *ahw; 1148 struct qlcnic_hardware_context *ahw;
1036 struct qlcnic_dump_template_hdr *tmpl_hdr; 1149 struct qlcnic_fw_dump *fw_dump;
1150 u32 version, csum, *tmp_buf;
1037 u8 use_flash_temp = 0; 1151 u8 use_flash_temp = 0;
1152 u32 temp_size = 0;
1153 int err;
1038 1154
1039 ahw = adapter->ahw; 1155 ahw = adapter->ahw;
1040 1156 fw_dump = &ahw->fw_dump;
1041 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size, 1157 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1042 &use_flash_temp); 1158 &use_flash_temp);
1043 if (err) { 1159 if (err) {
@@ -1046,11 +1162,11 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1046 return -EIO; 1162 return -EIO;
1047 } 1163 }
1048 1164
1049 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); 1165 fw_dump->tmpl_hdr = vzalloc(temp_size);
1050 if (!ahw->fw_dump.tmpl_hdr) 1166 if (!fw_dump->tmpl_hdr)
1051 return -ENOMEM; 1167 return -ENOMEM;
1052 1168
1053 tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr; 1169 tmp_buf = (u32 *)fw_dump->tmpl_hdr;
1054 if (use_flash_temp) 1170 if (use_flash_temp)
1055 goto flash_temp; 1171 goto flash_temp;
1056 1172
@@ -1065,8 +1181,8 @@ flash_temp:
1065 dev_err(&adapter->pdev->dev, 1181 dev_err(&adapter->pdev->dev,
1066 "Failed to get minidump template header %d\n", 1182 "Failed to get minidump template header %d\n",
1067 err); 1183 err);
1068 vfree(ahw->fw_dump.tmpl_hdr); 1184 vfree(fw_dump->tmpl_hdr);
1069 ahw->fw_dump.tmpl_hdr = NULL; 1185 fw_dump->tmpl_hdr = NULL;
1070 return -EIO; 1186 return -EIO;
1071 } 1187 }
1072 } 1188 }
@@ -1076,21 +1192,22 @@ flash_temp:
1076 if (csum) { 1192 if (csum) {
1077 dev_err(&adapter->pdev->dev, 1193 dev_err(&adapter->pdev->dev,
1078 "Template header checksum validation failed\n"); 1194 "Template header checksum validation failed\n");
1079 vfree(ahw->fw_dump.tmpl_hdr); 1195 vfree(fw_dump->tmpl_hdr);
1080 ahw->fw_dump.tmpl_hdr = NULL; 1196 fw_dump->tmpl_hdr = NULL;
1081 return -EIO; 1197 return -EIO;
1082 } 1198 }
1083 1199
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 1200 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1085 tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask; 1201
1086 dev_info(&adapter->pdev->dev, 1202 dev_info(&adapter->pdev->dev,
1087 "Default minidump capture mask 0x%x\n", 1203 "Default minidump capture mask 0x%x\n",
1088 tmpl_hdr->cap_mask); 1204 fw_dump->cap_mask);
1089 1205
1090 if ((tmpl_hdr->version & 0xfffff) >= 0x20001) 1206 if (qlcnic_83xx_check(adapter) &&
1091 ahw->fw_dump.use_pex_dma = true; 1207 (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
1208 fw_dump->use_pex_dma = true;
1092 else 1209 else
1093 ahw->fw_dump.use_pex_dma = false; 1210 fw_dump->use_pex_dma = false;
1094 1211
1095 qlcnic_enable_fw_dump_state(adapter); 1212 qlcnic_enable_fw_dump_state(adapter);
1096 1213
@@ -1099,21 +1216,22 @@ flash_temp:
1099 1216
1100int qlcnic_dump_fw(struct qlcnic_adapter *adapter) 1217int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1101{ 1218{
1102 __le32 *buffer;
1103 u32 ocm_window;
1104 char mesg[64];
1105 char *msg[] = {mesg, NULL};
1106 int i, k, ops_cnt, ops_index, dump_size = 0;
1107 u32 entry_offset, dump, no_entries, buf_offset = 0;
1108 struct qlcnic_dump_entry *entry;
1109 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1219 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1110 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1111 static const struct qlcnic_dump_operations *fw_dump_ops; 1220 static const struct qlcnic_dump_operations *fw_dump_ops;
1221 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
1222 u32 entry_offset, dump, no_entries, buf_offset = 0;
1223 int i, k, ops_cnt, ops_index, dump_size = 0;
1112 struct device *dev = &adapter->pdev->dev; 1224 struct device *dev = &adapter->pdev->dev;
1113 struct qlcnic_hardware_context *ahw; 1225 struct qlcnic_hardware_context *ahw;
1114 void *temp_buffer; 1226 struct qlcnic_dump_entry *entry;
1227 void *temp_buffer, *tmpl_hdr;
1228 u32 ocm_window;
1229 __le32 *buffer;
1230 char mesg[64];
1231 char *msg[] = {mesg, NULL};
1115 1232
1116 ahw = adapter->ahw; 1233 ahw = adapter->ahw;
1234 tmpl_hdr = fw_dump->tmpl_hdr;
1117 1235
1118 /* Return if we don't have firmware dump template header */ 1236 /* Return if we don't have firmware dump template header */
1119 if (!tmpl_hdr) 1237 if (!tmpl_hdr)
@@ -1133,8 +1251,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1133 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n"); 1251 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
1134 /* Calculate the size for dump data area only */ 1252 /* Calculate the size for dump data area only */
1135 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) 1253 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1136 if (i & tmpl_hdr->drv_cap_mask) 1254 if (i & fw_dump->cap_mask)
1137 dump_size += tmpl_hdr->cap_sizes[k]; 1255 dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
1256
1138 if (!dump_size) 1257 if (!dump_size)
1139 return -EIO; 1258 return -EIO;
1140 1259
@@ -1144,10 +1263,10 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1144 1263
1145 buffer = fw_dump->data; 1264 buffer = fw_dump->data;
1146 fw_dump->size = dump_size; 1265 fw_dump->size = dump_size;
1147 no_entries = tmpl_hdr->num_entries; 1266 no_entries = fw_dump->num_entries;
1148 entry_offset = tmpl_hdr->offset; 1267 entry_offset = fw_dump->offset;
1149 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; 1268 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1150 tmpl_hdr->sys_info[1] = adapter->fw_version; 1269 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1151 1270
1152 if (fw_dump->use_pex_dma) { 1271 if (fw_dump->use_pex_dma) {
1153 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE, 1272 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
@@ -1163,16 +1282,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1163 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); 1282 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1164 fw_dump_ops = qlcnic_fw_dump_ops; 1283 fw_dump_ops = qlcnic_fw_dump_ops;
1165 } else { 1284 } else {
1285 hdr_83xx = tmpl_hdr;
1166 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops); 1286 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1167 fw_dump_ops = qlcnic_83xx_fw_dump_ops; 1287 fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1168 ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func]; 1288 ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
1169 tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window; 1289 hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1170 tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func; 1290 hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1171 } 1291 }
1172 1292
1173 for (i = 0; i < no_entries; i++) { 1293 for (i = 0; i < no_entries; i++) {
1174 entry = (void *)tmpl_hdr + entry_offset; 1294 entry = tmpl_hdr + entry_offset;
1175 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { 1295 if (!(entry->hdr.mask & fw_dump->cap_mask)) {
1176 entry->hdr.flags |= QLCNIC_DUMP_SKIP; 1296 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1177 entry_offset += entry->hdr.offset; 1297 entry_offset += entry->hdr.offset;
1178 continue; 1298 continue;
@@ -1209,8 +1329,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1209 1329
1210 fw_dump->clr = 1; 1330 fw_dump->clr = 1;
1211 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); 1331 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
1212 dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n", 1332 netdev_info(adapter->netdev,
1213 adapter->netdev->name, fw_dump->size, tmpl_hdr->size); 1333 "Dump data %d bytes captured, template header size %d bytes\n",
1334 fw_dump->size, fw_dump->tmpl_hdr_size);
1214 /* Send a udev event to notify availability of FW dump */ 1335 /* Send a udev event to notify availability of FW dump */
1215 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); 1336 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1216 1337
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e5277a632671..14f748cbf0de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -15,6 +15,7 @@
15#define QLC_MAC_OPCODE_MASK 0x7 15#define QLC_MAC_OPCODE_MASK 0x7
16#define QLC_VF_FLOOD_BIT BIT_16 16#define QLC_VF_FLOOD_BIT BIT_16
17#define QLC_FLOOD_MODE 0x5 17#define QLC_FLOOD_MODE 0x5
18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
18 19
19static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); 20static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
20 21
@@ -335,8 +336,11 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
335 return err; 336 return err;
336 337
337 cmd.req.arg[1] = 0x4; 338 cmd.req.arg[1] = 0x4;
338 if (enable) 339 if (enable) {
339 cmd.req.arg[1] |= BIT_16; 340 cmd.req.arg[1] |= BIT_16;
341 if (qlcnic_84xx_check(adapter))
342 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
343 }
340 344
341 err = qlcnic_issue_cmd(adapter, &cmd); 345 err = qlcnic_issue_cmd(adapter, &cmd);
342 if (err) 346 if (err)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 3d64113a35af..448d156c3d08 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -350,33 +350,15 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
350 return size; 350 return size;
351} 351}
352 352
353static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
354{
355 struct qlcnic_hardware_context *ahw = adapter->ahw;
356 u32 count = 0;
357
358 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
359 return ahw->total_nic_func;
360
361 if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
362 count = QLC_DEFAULT_VNIC_COUNT;
363 else
364 count = ahw->max_vnic_func;
365
366 return count;
367}
368
369int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) 353int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
370{ 354{
371 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
372 int i; 355 int i;
373 356
374 for (i = 0; i < pci_func_count; i++) { 357 for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
375 if (adapter->npars[i].pci_func == pci_func) 358 if (adapter->npars[i].pci_func == pci_func)
376 return i; 359 return i;
377 } 360 }
378 361 return -EINVAL;
379 return -1;
380} 362}
381 363
382static int validate_pm_config(struct qlcnic_adapter *adapter, 364static int validate_pm_config(struct qlcnic_adapter *adapter,
@@ -464,23 +446,21 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
464{ 446{
465 struct device *dev = container_of(kobj, struct device, kobj); 447 struct device *dev = container_of(kobj, struct device, kobj);
466 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 448 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
467 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
468 struct qlcnic_pm_func_cfg *pm_cfg; 449 struct qlcnic_pm_func_cfg *pm_cfg;
469 int i, pm_cfg_size;
470 u8 pci_func; 450 u8 pci_func;
451 u32 count;
452 int i;
471 453
472 pm_cfg_size = pci_func_count * sizeof(*pm_cfg); 454 memset(buf, 0, size);
473 if (size != pm_cfg_size)
474 return QL_STATUS_INVALID_PARAM;
475
476 memset(buf, 0, pm_cfg_size);
477 pm_cfg = (struct qlcnic_pm_func_cfg *)buf; 455 pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
478 456 count = size / sizeof(struct qlcnic_pm_func_cfg);
479 for (i = 0; i < pci_func_count; i++) { 457 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
480 pci_func = adapter->npars[i].pci_func; 458 pci_func = adapter->npars[i].pci_func;
481 if (!adapter->npars[i].active) 459 if (pci_func >= count) {
460 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
461 __func__, adapter->ahw->total_nic_func, count);
482 continue; 462 continue;
483 463 }
484 if (!adapter->npars[i].eswitch_status) 464 if (!adapter->npars[i].eswitch_status)
485 continue; 465 continue;
486 466
@@ -494,7 +474,6 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
494static int validate_esw_config(struct qlcnic_adapter *adapter, 474static int validate_esw_config(struct qlcnic_adapter *adapter,
495 struct qlcnic_esw_func_cfg *esw_cfg, int count) 475 struct qlcnic_esw_func_cfg *esw_cfg, int count)
496{ 476{
497 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
498 struct qlcnic_hardware_context *ahw = adapter->ahw; 477 struct qlcnic_hardware_context *ahw = adapter->ahw;
499 int i, ret; 478 int i, ret;
500 u32 op_mode; 479 u32 op_mode;
@@ -507,7 +486,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
507 486
508 for (i = 0; i < count; i++) { 487 for (i = 0; i < count; i++) {
509 pci_func = esw_cfg[i].pci_func; 488 pci_func = esw_cfg[i].pci_func;
510 if (pci_func >= pci_func_count) 489 if (pci_func >= ahw->max_vnic_func)
511 return QL_STATUS_INVALID_PARAM; 490 return QL_STATUS_INVALID_PARAM;
512 491
513 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) 492 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -642,23 +621,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
642{ 621{
643 struct device *dev = container_of(kobj, struct device, kobj); 622 struct device *dev = container_of(kobj, struct device, kobj);
644 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 623 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
645 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
646 struct qlcnic_esw_func_cfg *esw_cfg; 624 struct qlcnic_esw_func_cfg *esw_cfg;
647 size_t esw_cfg_size; 625 u8 pci_func;
648 u8 i, pci_func; 626 u32 count;
649 627 int i;
650 esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
651 if (size != esw_cfg_size)
652 return QL_STATUS_INVALID_PARAM;
653 628
654 memset(buf, 0, esw_cfg_size); 629 memset(buf, 0, size);
655 esw_cfg = (struct qlcnic_esw_func_cfg *)buf; 630 esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
656 631 count = size / sizeof(struct qlcnic_esw_func_cfg);
657 for (i = 0; i < pci_func_count; i++) { 632 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
658 pci_func = adapter->npars[i].pci_func; 633 pci_func = adapter->npars[i].pci_func;
659 if (!adapter->npars[i].active) 634 if (pci_func >= count) {
635 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
636 __func__, adapter->ahw->total_nic_func, count);
660 continue; 637 continue;
661 638 }
662 if (!adapter->npars[i].eswitch_status) 639 if (!adapter->npars[i].eswitch_status)
663 continue; 640 continue;
664 641
@@ -741,23 +718,24 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
741{ 718{
742 struct device *dev = container_of(kobj, struct device, kobj); 719 struct device *dev = container_of(kobj, struct device, kobj);
743 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
744 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
745 struct qlcnic_npar_func_cfg *np_cfg; 721 struct qlcnic_npar_func_cfg *np_cfg;
746 struct qlcnic_info nic_info; 722 struct qlcnic_info nic_info;
747 size_t np_cfg_size;
748 int i, ret; 723 int i, ret;
749 724 u32 count;
750 np_cfg_size = pci_func_count * sizeof(*np_cfg);
751 if (size != np_cfg_size)
752 return QL_STATUS_INVALID_PARAM;
753 725
754 memset(&nic_info, 0, sizeof(struct qlcnic_info)); 726 memset(&nic_info, 0, sizeof(struct qlcnic_info));
755 memset(buf, 0, np_cfg_size); 727 memset(buf, 0, size);
756 np_cfg = (struct qlcnic_npar_func_cfg *)buf; 728 np_cfg = (struct qlcnic_npar_func_cfg *)buf;
757 729
758 for (i = 0; i < pci_func_count; i++) { 730 count = size / sizeof(struct qlcnic_npar_func_cfg);
731 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
759 if (qlcnic_is_valid_nic_func(adapter, i) < 0) 732 if (qlcnic_is_valid_nic_func(adapter, i) < 0)
760 continue; 733 continue;
734 if (adapter->npars[i].pci_func >= count) {
735 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
736 __func__, adapter->ahw->total_nic_func, count);
737 continue;
738 }
761 ret = qlcnic_get_nic_info(adapter, &nic_info, i); 739 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
762 if (ret) 740 if (ret)
763 return ret; 741 return ret;
@@ -783,7 +761,6 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
783{ 761{
784 struct device *dev = container_of(kobj, struct device, kobj); 762 struct device *dev = container_of(kobj, struct device, kobj);
785 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 763 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
786 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
787 struct qlcnic_esw_statistics port_stats; 764 struct qlcnic_esw_statistics port_stats;
788 int ret; 765 int ret;
789 766
@@ -793,7 +770,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
793 if (size != sizeof(struct qlcnic_esw_statistics)) 770 if (size != sizeof(struct qlcnic_esw_statistics))
794 return QL_STATUS_INVALID_PARAM; 771 return QL_STATUS_INVALID_PARAM;
795 772
796 if (offset >= pci_func_count) 773 if (offset >= adapter->ahw->max_vnic_func)
797 return QL_STATUS_INVALID_PARAM; 774 return QL_STATUS_INVALID_PARAM;
798 775
799 memset(&port_stats, 0, size); 776 memset(&port_stats, 0, size);
@@ -884,13 +861,12 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
884 861
885 struct device *dev = container_of(kobj, struct device, kobj); 862 struct device *dev = container_of(kobj, struct device, kobj);
886 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 863 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
887 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
888 int ret; 864 int ret;
889 865
890 if (qlcnic_83xx_check(adapter)) 866 if (qlcnic_83xx_check(adapter))
891 return QLC_STATUS_UNSUPPORTED_CMD; 867 return QLC_STATUS_UNSUPPORTED_CMD;
892 868
893 if (offset >= pci_func_count) 869 if (offset >= adapter->ahw->max_vnic_func)
894 return QL_STATUS_INVALID_PARAM; 870 return QL_STATUS_INVALID_PARAM;
895 871
896 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, 872 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -914,17 +890,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
914{ 890{
915 struct device *dev = container_of(kobj, struct device, kobj); 891 struct device *dev = container_of(kobj, struct device, kobj);
916 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 892 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
917 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
918 struct qlcnic_pci_func_cfg *pci_cfg; 893 struct qlcnic_pci_func_cfg *pci_cfg;
919 struct qlcnic_pci_info *pci_info; 894 struct qlcnic_pci_info *pci_info;
920 size_t pci_cfg_sz;
921 int i, ret; 895 int i, ret;
896 u32 count;
922 897
923 pci_cfg_sz = pci_func_count * sizeof(*pci_cfg); 898 pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
924 if (size != pci_cfg_sz)
925 return QL_STATUS_INVALID_PARAM;
926
927 pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
928 if (!pci_info) 899 if (!pci_info)
929 return -ENOMEM; 900 return -ENOMEM;
930 901
@@ -935,7 +906,8 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
935 } 906 }
936 907
937 pci_cfg = (struct qlcnic_pci_func_cfg *)buf; 908 pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
938 for (i = 0; i < pci_func_count; i++) { 909 count = size / sizeof(struct qlcnic_pci_func_cfg);
910 for (i = 0; i < count; i++) {
939 pci_cfg[i].pci_func = pci_info[i].id; 911 pci_cfg[i].pci_func = pci_info[i].id;
940 pci_cfg[i].func_type = pci_info[i].type; 912 pci_cfg[i].func_type = pci_info[i].type;
941 pci_cfg[i].func_state = 0; 913 pci_cfg[i].func_state = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 656c65ddadb4..0a1d76acab81 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2556,11 +2556,10 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2556 2556
2557 if (skb_is_gso(skb)) { 2557 if (skb_is_gso(skb)) {
2558 int err; 2558 int err;
2559 if (skb_header_cloned(skb)) { 2559
2560 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2560 err = skb_cow_head(skb, 0);
2561 if (err) 2561 if (err < 0)
2562 return err; 2562 return err;
2563 }
2564 2563
2565 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; 2564 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2566 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; 2565 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
@@ -3331,24 +3330,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
3331 for (i = 0; i < qdev->intr_count; i++) 3330 for (i = 0; i < qdev->intr_count; i++)
3332 qdev->msi_x_entry[i].entry = i; 3331 qdev->msi_x_entry[i].entry = i;
3333 3332
3334 /* Loop to get our vectors. We start with 3333 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3335 * what we want and settle for what we get. 3334 1, qdev->intr_count);
3336 */
3337 do {
3338 err = pci_enable_msix(qdev->pdev,
3339 qdev->msi_x_entry, qdev->intr_count);
3340 if (err > 0)
3341 qdev->intr_count = err;
3342 } while (err > 0);
3343
3344 if (err < 0) { 3335 if (err < 0) {
3345 kfree(qdev->msi_x_entry); 3336 kfree(qdev->msi_x_entry);
3346 qdev->msi_x_entry = NULL; 3337 qdev->msi_x_entry = NULL;
3347 netif_warn(qdev, ifup, qdev->ndev, 3338 netif_warn(qdev, ifup, qdev->ndev,
3348 "MSI-X Enable failed, trying MSI.\n"); 3339 "MSI-X Enable failed, trying MSI.\n");
3349 qdev->intr_count = 1;
3350 qlge_irq_type = MSI_IRQ; 3340 qlge_irq_type = MSI_IRQ;
3351 } else if (err == 0) { 3341 } else {
3342 qdev->intr_count = err;
3352 set_bit(QL_MSIX_ENABLED, &qdev->flags); 3343 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3353 netif_info(qdev, ifup, qdev->ndev, 3344 netif_info(qdev, ifup, qdev->ndev,
3354 "MSI-X Enabled, got %d vectors.\n", 3345 "MSI-X Enabled, got %d vectors.\n",
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 819b74cefd64..cd045ecb9816 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -270,11 +270,6 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
270 return r6040_phy_write(ioaddr, phy_addr, reg, value); 270 return r6040_phy_write(ioaddr, phy_addr, reg, value);
271} 271}
272 272
273static int r6040_mdiobus_reset(struct mii_bus *bus)
274{
275 return 0;
276}
277
278static void r6040_free_txbufs(struct net_device *dev) 273static void r6040_free_txbufs(struct net_device *dev)
279{ 274{
280 struct r6040_private *lp = netdev_priv(dev); 275 struct r6040_private *lp = netdev_priv(dev);
@@ -1191,7 +1186,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1191 lp->mii_bus->priv = dev; 1186 lp->mii_bus->priv = dev;
1192 lp->mii_bus->read = r6040_mdiobus_read; 1187 lp->mii_bus->read = r6040_mdiobus_read;
1193 lp->mii_bus->write = r6040_mdiobus_write; 1188 lp->mii_bus->write = r6040_mdiobus_write;
1194 lp->mii_bus->reset = r6040_mdiobus_reset;
1195 lp->mii_bus->name = "r6040_eth_mii"; 1189 lp->mii_bus->name = "r6040_eth_mii";
1196 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1190 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1197 dev_name(&pdev->dev), card_idx); 1191 dev_name(&pdev->dev), card_idx);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 737c1a881f78..2bc728e65e24 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -476,7 +476,7 @@ rx_status_loop:
476 rx = 0; 476 rx = 0;
477 cpw16(IntrStatus, cp_rx_intr_mask); 477 cpw16(IntrStatus, cp_rx_intr_mask);
478 478
479 while (1) { 479 while (rx < budget) {
480 u32 status, len; 480 u32 status, len;
481 dma_addr_t mapping, new_mapping; 481 dma_addr_t mapping, new_mapping;
482 struct sk_buff *skb, *new_skb; 482 struct sk_buff *skb, *new_skb;
@@ -554,9 +554,6 @@ rx_next:
554 else 554 else
555 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); 555 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
556 rx_tail = NEXT_RX(rx_tail); 556 rx_tail = NEXT_RX(rx_tail);
557
558 if (rx >= budget)
559 break;
560 } 557 }
561 558
562 cp->rx_tail = rx_tail; 559 cp->rx_tail = rx_tail;
@@ -899,7 +896,7 @@ out_unlock:
899 896
900 return NETDEV_TX_OK; 897 return NETDEV_TX_OK;
901out_dma_error: 898out_dma_error:
902 kfree_skb(skb); 899 dev_kfree_skb_any(skb);
903 cp->dev->stats.tx_dropped++; 900 cp->dev->stats.tx_dropped++;
904 goto out_unlock; 901 goto out_unlock;
905} 902}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da5972eefdd2..2e5df148af4c 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
1717 if (len < ETH_ZLEN) 1717 if (len < ETH_ZLEN)
1718 memset(tp->tx_buf[entry], 0, ETH_ZLEN); 1718 memset(tp->tx_buf[entry], 0, ETH_ZLEN);
1719 skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); 1719 skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
1720 dev_kfree_skb(skb); 1720 dev_kfree_skb_any(skb);
1721 } else { 1721 } else {
1722 dev_kfree_skb(skb); 1722 dev_kfree_skb_any(skb);
1723 dev->stats.tx_dropped++; 1723 dev->stats.tx_dropped++;
1724 return NETDEV_TX_OK; 1724 return NETDEV_TX_OK;
1725 } 1725 }
@@ -2522,16 +2522,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2522 netdev_stats_to_stats64(stats, &dev->stats); 2522 netdev_stats_to_stats64(stats, &dev->stats);
2523 2523
2524 do { 2524 do {
2525 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); 2525 start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
2526 stats->rx_packets = tp->rx_stats.packets; 2526 stats->rx_packets = tp->rx_stats.packets;
2527 stats->rx_bytes = tp->rx_stats.bytes; 2527 stats->rx_bytes = tp->rx_stats.bytes;
2528 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); 2528 } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
2529 2529
2530 do { 2530 do {
2531 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); 2531 start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
2532 stats->tx_packets = tp->tx_stats.packets; 2532 stats->tx_packets = tp->tx_stats.packets;
2533 stats->tx_bytes = tp->tx_stats.bytes; 2533 stats->tx_bytes = tp->tx_stats.bytes;
2534 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); 2534 } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
2535 2535
2536 return stats; 2536 return stats;
2537} 2537}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3ff7bc3e7a23..aa1c079f231d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5834 tp->TxDescArray + entry); 5834 tp->TxDescArray + entry);
5835 if (skb) { 5835 if (skb) {
5836 tp->dev->stats.tx_dropped++; 5836 tp->dev->stats.tx_dropped++;
5837 dev_kfree_skb(skb); 5837 dev_kfree_skb_any(skb);
5838 tx_skb->skb = NULL; 5838 tx_skb->skb = NULL;
5839 } 5839 }
5840 } 5840 }
@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6059err_dma_1: 6059err_dma_1:
6060 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); 6060 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
6061err_dma_0: 6061err_dma_0:
6062 dev_kfree_skb(skb); 6062 dev_kfree_skb_any(skb);
6063err_update_stats: 6063err_update_stats:
6064 dev->stats.tx_dropped++; 6064 dev->stats.tx_dropped++;
6065 return NETDEV_TX_OK; 6065 return NETDEV_TX_OK;
@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
6142 tp->tx_stats.packets++; 6142 tp->tx_stats.packets++;
6143 tp->tx_stats.bytes += tx_skb->skb->len; 6143 tp->tx_stats.bytes += tx_skb->skb->len;
6144 u64_stats_update_end(&tp->tx_stats.syncp); 6144 u64_stats_update_end(&tp->tx_stats.syncp);
6145 dev_kfree_skb(tx_skb->skb); 6145 dev_kfree_skb_any(tx_skb->skb);
6146 tx_skb->skb = NULL; 6146 tx_skb->skb = NULL;
6147 } 6147 }
6148 dirty_tx++; 6148 dirty_tx++;
@@ -6590,17 +6590,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6590 rtl8169_rx_missed(dev, ioaddr); 6590 rtl8169_rx_missed(dev, ioaddr);
6591 6591
6592 do { 6592 do {
6593 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); 6593 start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
6594 stats->rx_packets = tp->rx_stats.packets; 6594 stats->rx_packets = tp->rx_stats.packets;
6595 stats->rx_bytes = tp->rx_stats.bytes; 6595 stats->rx_bytes = tp->rx_stats.bytes;
6596 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); 6596 } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
6597 6597
6598 6598
6599 do { 6599 do {
6600 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); 6600 start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
6601 stats->tx_packets = tp->tx_stats.packets; 6601 stats->tx_packets = tp->tx_stats.packets;
6602 stats->tx_bytes = tp->tx_stats.bytes; 6602 stats->tx_bytes = tp->tx_stats.bytes;
6603 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); 6603 } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
6604 6604
6605 stats->rx_dropped = dev->stats.rx_dropped; 6605 stats->rx_dropped = dev->stats.rx_dropped;
6606 stats->tx_dropped = dev->stats.tx_dropped; 6606 stats->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 040cb94e8219..6a9509ccd33b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,9 @@
1/* SuperH Ethernet device driver 1/* SuperH Ethernet device driver
2 * 2 *
3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4 * Copyright (C) 2008-2013 Renesas Solutions Corp. 4 * Copyright (C) 2008-2014 Renesas Solutions Corp.
5 * Copyright (C) 2013 Cogent Embedded, Inc. 5 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
6 * Copyright (C) 2014 Codethink Limited
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -27,6 +28,10 @@
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/mdio-bitbang.h> 29#include <linux/mdio-bitbang.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/of.h>
32#include <linux/of_device.h>
33#include <linux/of_irq.h>
34#include <linux/of_net.h>
30#include <linux/phy.h> 35#include <linux/phy.h>
31#include <linux/cache.h> 36#include <linux/cache.h>
32#include <linux/io.h> 37#include <linux/io.h>
@@ -36,6 +41,7 @@
36#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
37#include <linux/clk.h> 42#include <linux/clk.h>
38#include <linux/sh_eth.h> 43#include <linux/sh_eth.h>
44#include <linux/of_mdio.h>
39 45
40#include "sh_eth.h" 46#include "sh_eth.h"
41 47
@@ -394,7 +400,8 @@ static void sh_eth_select_mii(struct net_device *ndev)
394 value = 0x0; 400 value = 0x0;
395 break; 401 break;
396 default: 402 default:
397 pr_warn("PHY interface mode was not setup. Set to MII.\n"); 403 netdev_warn(ndev,
404 "PHY interface mode was not setup. Set to MII.\n");
398 value = 0x1; 405 value = 0x1;
399 break; 406 break;
400 } 407 }
@@ -848,7 +855,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
848 cnt--; 855 cnt--;
849 } 856 }
850 if (cnt <= 0) { 857 if (cnt <= 0) {
851 pr_err("Device reset failed\n"); 858 netdev_err(ndev, "Device reset failed\n");
852 ret = -ETIMEDOUT; 859 ret = -ETIMEDOUT;
853 } 860 }
854 return ret; 861 return ret;
@@ -866,7 +873,7 @@ static int sh_eth_reset(struct net_device *ndev)
866 873
867 ret = sh_eth_check_reset(ndev); 874 ret = sh_eth_check_reset(ndev);
868 if (ret) 875 if (ret)
869 goto out; 876 return ret;
870 877
871 /* Table Init */ 878 /* Table Init */
872 sh_eth_write(ndev, 0x0, TDLAR); 879 sh_eth_write(ndev, 0x0, TDLAR);
@@ -893,7 +900,6 @@ static int sh_eth_reset(struct net_device *ndev)
893 EDMR); 900 EDMR);
894 } 901 }
895 902
896out:
897 return ret; 903 return ret;
898} 904}
899 905
@@ -1257,7 +1263,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1257 /* Soft Reset */ 1263 /* Soft Reset */
1258 ret = sh_eth_reset(ndev); 1264 ret = sh_eth_reset(ndev);
1259 if (ret) 1265 if (ret)
1260 goto out; 1266 return ret;
1261 1267
1262 if (mdp->cd->rmiimode) 1268 if (mdp->cd->rmiimode)
1263 sh_eth_write(ndev, 0x1, RMIIMODE); 1269 sh_eth_write(ndev, 0x1, RMIIMODE);
@@ -1336,7 +1342,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1336 netif_start_queue(ndev); 1342 netif_start_queue(ndev);
1337 } 1343 }
1338 1344
1339out:
1340 return ret; 1345 return ret;
1341} 1346}
1342 1347
@@ -1550,8 +1555,7 @@ ignore_link:
1550 /* Unused write back interrupt */ 1555 /* Unused write back interrupt */
1551 if (intr_status & EESR_TABT) { /* Transmit Abort int */ 1556 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1552 ndev->stats.tx_aborted_errors++; 1557 ndev->stats.tx_aborted_errors++;
1553 if (netif_msg_tx_err(mdp)) 1558 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1554 dev_err(&ndev->dev, "Transmit Abort\n");
1555 } 1559 }
1556 } 1560 }
1557 1561
@@ -1560,45 +1564,38 @@ ignore_link:
1560 if (intr_status & EESR_RFRMER) { 1564 if (intr_status & EESR_RFRMER) {
1561 /* Receive Frame Overflow int */ 1565 /* Receive Frame Overflow int */
1562 ndev->stats.rx_frame_errors++; 1566 ndev->stats.rx_frame_errors++;
1563 if (netif_msg_rx_err(mdp)) 1567 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1564 dev_err(&ndev->dev, "Receive Abort\n");
1565 } 1568 }
1566 } 1569 }
1567 1570
1568 if (intr_status & EESR_TDE) { 1571 if (intr_status & EESR_TDE) {
1569 /* Transmit Descriptor Empty int */ 1572 /* Transmit Descriptor Empty int */
1570 ndev->stats.tx_fifo_errors++; 1573 ndev->stats.tx_fifo_errors++;
1571 if (netif_msg_tx_err(mdp)) 1574 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1572 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1573 } 1575 }
1574 1576
1575 if (intr_status & EESR_TFE) { 1577 if (intr_status & EESR_TFE) {
1576 /* FIFO under flow */ 1578 /* FIFO under flow */
1577 ndev->stats.tx_fifo_errors++; 1579 ndev->stats.tx_fifo_errors++;
1578 if (netif_msg_tx_err(mdp)) 1580 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1579 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1580 } 1581 }
1581 1582
1582 if (intr_status & EESR_RDE) { 1583 if (intr_status & EESR_RDE) {
1583 /* Receive Descriptor Empty int */ 1584 /* Receive Descriptor Empty int */
1584 ndev->stats.rx_over_errors++; 1585 ndev->stats.rx_over_errors++;
1585 1586 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1586 if (netif_msg_rx_err(mdp))
1587 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1588 } 1587 }
1589 1588
1590 if (intr_status & EESR_RFE) { 1589 if (intr_status & EESR_RFE) {
1591 /* Receive FIFO Overflow int */ 1590 /* Receive FIFO Overflow int */
1592 ndev->stats.rx_fifo_errors++; 1591 ndev->stats.rx_fifo_errors++;
1593 if (netif_msg_rx_err(mdp)) 1592 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1594 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1595 } 1593 }
1596 1594
1597 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1595 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1598 /* Address Error */ 1596 /* Address Error */
1599 ndev->stats.tx_fifo_errors++; 1597 ndev->stats.tx_fifo_errors++;
1600 if (netif_msg_tx_err(mdp)) 1598 netif_err(mdp, tx_err, ndev, "Address Error\n");
1601 dev_err(&ndev->dev, "Address Error\n");
1602 } 1599 }
1603 1600
1604 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1601 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1609,9 +1606,9 @@ ignore_link:
1609 u32 edtrr = sh_eth_read(ndev, EDTRR); 1606 u32 edtrr = sh_eth_read(ndev, EDTRR);
1610 1607
1611 /* dmesg */ 1608 /* dmesg */
1612 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1609 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1613 intr_status, mdp->cur_tx, mdp->dirty_tx, 1610 intr_status, mdp->cur_tx, mdp->dirty_tx,
1614 (u32)ndev->state, edtrr); 1611 (u32)ndev->state, edtrr);
1615 /* dirty buffer free */ 1612 /* dirty buffer free */
1616 sh_eth_txfree(ndev); 1613 sh_eth_txfree(ndev);
1617 1614
@@ -1656,9 +1653,9 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1656 EESIPR); 1653 EESIPR);
1657 __napi_schedule(&mdp->napi); 1654 __napi_schedule(&mdp->napi);
1658 } else { 1655 } else {
1659 dev_warn(&ndev->dev, 1656 netdev_warn(ndev,
1660 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", 1657 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1661 intr_status, intr_enable); 1658 intr_status, intr_enable);
1662 } 1659 }
1663 } 1660 }
1664 1661
@@ -1757,27 +1754,42 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1757/* PHY init function */ 1754/* PHY init function */
1758static int sh_eth_phy_init(struct net_device *ndev) 1755static int sh_eth_phy_init(struct net_device *ndev)
1759{ 1756{
1757 struct device_node *np = ndev->dev.parent->of_node;
1760 struct sh_eth_private *mdp = netdev_priv(ndev); 1758 struct sh_eth_private *mdp = netdev_priv(ndev);
1761 char phy_id[MII_BUS_ID_SIZE + 3];
1762 struct phy_device *phydev = NULL; 1759 struct phy_device *phydev = NULL;
1763 1760
1764 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1765 mdp->mii_bus->id, mdp->phy_id);
1766
1767 mdp->link = 0; 1761 mdp->link = 0;
1768 mdp->speed = 0; 1762 mdp->speed = 0;
1769 mdp->duplex = -1; 1763 mdp->duplex = -1;
1770 1764
1771 /* Try connect to PHY */ 1765 /* Try connect to PHY */
1772 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1766 if (np) {
1773 mdp->phy_interface); 1767 struct device_node *pn;
1768
1769 pn = of_parse_phandle(np, "phy-handle", 0);
1770 phydev = of_phy_connect(ndev, pn,
1771 sh_eth_adjust_link, 0,
1772 mdp->phy_interface);
1773
1774 if (!phydev)
1775 phydev = ERR_PTR(-ENOENT);
1776 } else {
1777 char phy_id[MII_BUS_ID_SIZE + 3];
1778
1779 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1780 mdp->mii_bus->id, mdp->phy_id);
1781
1782 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1783 mdp->phy_interface);
1784 }
1785
1774 if (IS_ERR(phydev)) { 1786 if (IS_ERR(phydev)) {
1775 dev_err(&ndev->dev, "phy_connect failed\n"); 1787 netdev_err(ndev, "failed to connect PHY\n");
1776 return PTR_ERR(phydev); 1788 return PTR_ERR(phydev);
1777 } 1789 }
1778 1790
1779 dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n", 1791 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1780 phydev->addr, phydev->irq, phydev->drv->name); 1792 phydev->addr, phydev->irq, phydev->drv->name);
1781 1793
1782 mdp->phydev = phydev; 1794 mdp->phydev = phydev;
1783 1795
@@ -1958,12 +1970,12 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
1958 1970
1959 ret = sh_eth_ring_init(ndev); 1971 ret = sh_eth_ring_init(ndev);
1960 if (ret < 0) { 1972 if (ret < 0) {
1961 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); 1973 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1962 return ret; 1974 return ret;
1963 } 1975 }
1964 ret = sh_eth_dev_init(ndev, false); 1976 ret = sh_eth_dev_init(ndev, false);
1965 if (ret < 0) { 1977 if (ret < 0) {
1966 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); 1978 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1967 return ret; 1979 return ret;
1968 } 1980 }
1969 1981
@@ -2004,7 +2016,7 @@ static int sh_eth_open(struct net_device *ndev)
2004 ret = request_irq(ndev->irq, sh_eth_interrupt, 2016 ret = request_irq(ndev->irq, sh_eth_interrupt,
2005 mdp->cd->irq_flags, ndev->name, ndev); 2017 mdp->cd->irq_flags, ndev->name, ndev);
2006 if (ret) { 2018 if (ret) {
2007 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 2019 netdev_err(ndev, "Can not assign IRQ number\n");
2008 goto out_napi_off; 2020 goto out_napi_off;
2009 } 2021 }
2010 2022
@@ -2042,10 +2054,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2042 2054
2043 netif_stop_queue(ndev); 2055 netif_stop_queue(ndev);
2044 2056
2045 if (netif_msg_timer(mdp)) { 2057 netif_err(mdp, timer, ndev,
2046 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n", 2058 "transmit timed out, status %8.8x, resetting...\n",
2047 ndev->name, (int)sh_eth_read(ndev, EESR)); 2059 (int)sh_eth_read(ndev, EESR));
2048 }
2049 2060
2050 /* tx_errors count up */ 2061 /* tx_errors count up */
2051 ndev->stats.tx_errors++; 2062 ndev->stats.tx_errors++;
@@ -2080,8 +2091,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2080 spin_lock_irqsave(&mdp->lock, flags); 2091 spin_lock_irqsave(&mdp->lock, flags);
2081 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2092 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2082 if (!sh_eth_txfree(ndev)) { 2093 if (!sh_eth_txfree(ndev)) {
2083 if (netif_msg_tx_queued(mdp)) 2094 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2084 dev_warn(&ndev->dev, "TxFD exhausted.\n");
2085 netif_stop_queue(ndev); 2095 netif_stop_queue(ndev);
2086 spin_unlock_irqrestore(&mdp->lock, flags); 2096 spin_unlock_irqrestore(&mdp->lock, flags);
2087 return NETDEV_TX_BUSY; 2097 return NETDEV_TX_BUSY;
@@ -2098,8 +2108,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2098 skb->len + 2); 2108 skb->len + 2);
2099 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2109 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2100 DMA_TO_DEVICE); 2110 DMA_TO_DEVICE);
2101 if (skb->len < ETHERSMALL) 2111 if (skb->len < ETH_ZLEN)
2102 txdesc->buffer_length = ETHERSMALL; 2112 txdesc->buffer_length = ETH_ZLEN;
2103 else 2113 else
2104 txdesc->buffer_length = skb->len; 2114 txdesc->buffer_length = skb->len;
2105 2115
@@ -2251,7 +2261,7 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
2251 udelay(10); 2261 udelay(10);
2252 timeout--; 2262 timeout--;
2253 if (timeout <= 0) { 2263 if (timeout <= 0) {
2254 dev_err(&ndev->dev, "%s: timeout\n", __func__); 2264 netdev_err(ndev, "%s: timeout\n", __func__);
2255 return -ETIMEDOUT; 2265 return -ETIMEDOUT;
2256 } 2266 }
2257 } 2267 }
@@ -2571,37 +2581,30 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2571} 2581}
2572 2582
2573/* MDIO bus release function */ 2583/* MDIO bus release function */
2574static int sh_mdio_release(struct net_device *ndev) 2584static int sh_mdio_release(struct sh_eth_private *mdp)
2575{ 2585{
2576 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2577
2578 /* unregister mdio bus */ 2586 /* unregister mdio bus */
2579 mdiobus_unregister(bus); 2587 mdiobus_unregister(mdp->mii_bus);
2580
2581 /* remove mdio bus info from net_device */
2582 dev_set_drvdata(&ndev->dev, NULL);
2583 2588
2584 /* free bitbang info */ 2589 /* free bitbang info */
2585 free_mdio_bitbang(bus); 2590 free_mdio_bitbang(mdp->mii_bus);
2586 2591
2587 return 0; 2592 return 0;
2588} 2593}
2589 2594
2590/* MDIO bus init function */ 2595/* MDIO bus init function */
2591static int sh_mdio_init(struct net_device *ndev, int id, 2596static int sh_mdio_init(struct sh_eth_private *mdp,
2592 struct sh_eth_plat_data *pd) 2597 struct sh_eth_plat_data *pd)
2593{ 2598{
2594 int ret, i; 2599 int ret, i;
2595 struct bb_info *bitbang; 2600 struct bb_info *bitbang;
2596 struct sh_eth_private *mdp = netdev_priv(ndev); 2601 struct platform_device *pdev = mdp->pdev;
2602 struct device *dev = &mdp->pdev->dev;
2597 2603
2598 /* create bit control struct for PHY */ 2604 /* create bit control struct for PHY */
2599 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info), 2605 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2600 GFP_KERNEL); 2606 if (!bitbang)
2601 if (!bitbang) { 2607 return -ENOMEM;
2602 ret = -ENOMEM;
2603 goto out;
2604 }
2605 2608
2606 /* bitbang init */ 2609 /* bitbang init */
2607 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2610 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
@@ -2614,44 +2617,42 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2614 2617
2615 /* MII controller setting */ 2618 /* MII controller setting */
2616 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2619 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2617 if (!mdp->mii_bus) { 2620 if (!mdp->mii_bus)
2618 ret = -ENOMEM; 2621 return -ENOMEM;
2619 goto out;
2620 }
2621 2622
2622 /* Hook up MII support for ethtool */ 2623 /* Hook up MII support for ethtool */
2623 mdp->mii_bus->name = "sh_mii"; 2624 mdp->mii_bus->name = "sh_mii";
2624 mdp->mii_bus->parent = &ndev->dev; 2625 mdp->mii_bus->parent = dev;
2625 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2626 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2626 mdp->pdev->name, id); 2627 pdev->name, pdev->id);
2627 2628
2628 /* PHY IRQ */ 2629 /* PHY IRQ */
2629 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev, 2630 mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
2630 sizeof(int) * PHY_MAX_ADDR,
2631 GFP_KERNEL); 2631 GFP_KERNEL);
2632 if (!mdp->mii_bus->irq) { 2632 if (!mdp->mii_bus->irq) {
2633 ret = -ENOMEM; 2633 ret = -ENOMEM;
2634 goto out_free_bus; 2634 goto out_free_bus;
2635 } 2635 }
2636 2636
2637 for (i = 0; i < PHY_MAX_ADDR; i++) 2637 /* register MDIO bus */
2638 mdp->mii_bus->irq[i] = PHY_POLL; 2638 if (dev->of_node) {
2639 if (pd->phy_irq > 0) 2639 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2640 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; 2640 } else {
2641 for (i = 0; i < PHY_MAX_ADDR; i++)
2642 mdp->mii_bus->irq[i] = PHY_POLL;
2643 if (pd->phy_irq > 0)
2644 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2645
2646 ret = mdiobus_register(mdp->mii_bus);
2647 }
2641 2648
2642 /* register mdio bus */
2643 ret = mdiobus_register(mdp->mii_bus);
2644 if (ret) 2649 if (ret)
2645 goto out_free_bus; 2650 goto out_free_bus;
2646 2651
2647 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2648
2649 return 0; 2652 return 0;
2650 2653
2651out_free_bus: 2654out_free_bus:
2652 free_mdio_bitbang(mdp->mii_bus); 2655 free_mdio_bitbang(mdp->mii_bus);
2653
2654out:
2655 return ret; 2656 return ret;
2656} 2657}
2657 2658
@@ -2676,7 +2677,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
2676 reg_offset = sh_eth_offset_fast_sh3_sh2; 2677 reg_offset = sh_eth_offset_fast_sh3_sh2;
2677 break; 2678 break;
2678 default: 2679 default:
2679 pr_err("Unknown register type (%d)\n", register_type);
2680 break; 2680 break;
2681 } 2681 }
2682 2682
@@ -2710,6 +2710,48 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2710 .ndo_change_mtu = eth_change_mtu, 2710 .ndo_change_mtu = eth_change_mtu,
2711}; 2711};
2712 2712
2713#ifdef CONFIG_OF
2714static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2715{
2716 struct device_node *np = dev->of_node;
2717 struct sh_eth_plat_data *pdata;
2718 const char *mac_addr;
2719
2720 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2721 if (!pdata)
2722 return NULL;
2723
2724 pdata->phy_interface = of_get_phy_mode(np);
2725
2726 mac_addr = of_get_mac_address(np);
2727 if (mac_addr)
2728 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2729
2730 pdata->no_ether_link =
2731 of_property_read_bool(np, "renesas,no-ether-link");
2732 pdata->ether_link_active_low =
2733 of_property_read_bool(np, "renesas,ether-link-active-low");
2734
2735 return pdata;
2736}
2737
2738static const struct of_device_id sh_eth_match_table[] = {
2739 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2740 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2741 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2742 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2743 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2744 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2745 { }
2746};
2747MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2748#else
2749static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2750{
2751 return NULL;
2752}
2753#endif
2754
2713static int sh_eth_drv_probe(struct platform_device *pdev) 2755static int sh_eth_drv_probe(struct platform_device *pdev)
2714{ 2756{
2715 int ret, devno = 0; 2757 int ret, devno = 0;
@@ -2723,15 +2765,15 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2723 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2765 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2724 if (unlikely(res == NULL)) { 2766 if (unlikely(res == NULL)) {
2725 dev_err(&pdev->dev, "invalid resource\n"); 2767 dev_err(&pdev->dev, "invalid resource\n");
2726 ret = -EINVAL; 2768 return -EINVAL;
2727 goto out;
2728 } 2769 }
2729 2770
2730 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2771 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2731 if (!ndev) { 2772 if (!ndev)
2732 ret = -ENOMEM; 2773 return -ENOMEM;
2733 goto out; 2774
2734 } 2775 pm_runtime_enable(&pdev->dev);
2776 pm_runtime_get_sync(&pdev->dev);
2735 2777
2736 /* The sh Ether-specific entries in the device structure. */ 2778 /* The sh Ether-specific entries in the device structure. */
2737 ndev->base_addr = res->start; 2779 ndev->base_addr = res->start;
@@ -2760,9 +2802,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2760 2802
2761 spin_lock_init(&mdp->lock); 2803 spin_lock_init(&mdp->lock);
2762 mdp->pdev = pdev; 2804 mdp->pdev = pdev;
2763 pm_runtime_enable(&pdev->dev);
2764 pm_runtime_resume(&pdev->dev);
2765 2805
2806 if (pdev->dev.of_node)
2807 pd = sh_eth_parse_dt(&pdev->dev);
2766 if (!pd) { 2808 if (!pd) {
2767 dev_err(&pdev->dev, "no platform data\n"); 2809 dev_err(&pdev->dev, "no platform data\n");
2768 ret = -EINVAL; 2810 ret = -EINVAL;
@@ -2778,8 +2820,22 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2778 mdp->ether_link_active_low = pd->ether_link_active_low; 2820 mdp->ether_link_active_low = pd->ether_link_active_low;
2779 2821
2780 /* set cpu data */ 2822 /* set cpu data */
2781 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 2823 if (id) {
2824 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2825 } else {
2826 const struct of_device_id *match;
2827
2828 match = of_match_device(of_match_ptr(sh_eth_match_table),
2829 &pdev->dev);
2830 mdp->cd = (struct sh_eth_cpu_data *)match->data;
2831 }
2782 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); 2832 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2833 if (!mdp->reg_offset) {
2834 dev_err(&pdev->dev, "Unknown register type (%d)\n",
2835 mdp->cd->register_type);
2836 ret = -EINVAL;
2837 goto out_release;
2838 }
2783 sh_eth_set_default_cpu_data(mdp->cd); 2839 sh_eth_set_default_cpu_data(mdp->cd);
2784 2840
2785 /* set function */ 2841 /* set function */
@@ -2825,6 +2881,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2825 } 2881 }
2826 } 2882 }
2827 2883
2884 /* MDIO bus init */
2885 ret = sh_mdio_init(mdp, pd);
2886 if (ret) {
2887 dev_err(&ndev->dev, "failed to initialise MDIO\n");
2888 goto out_release;
2889 }
2890
2828 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); 2891 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2829 2892
2830 /* network device register */ 2893 /* network device register */
@@ -2832,31 +2895,26 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2832 if (ret) 2895 if (ret)
2833 goto out_napi_del; 2896 goto out_napi_del;
2834 2897
2835 /* mdio bus init */
2836 ret = sh_mdio_init(ndev, pdev->id, pd);
2837 if (ret)
2838 goto out_unregister;
2839
2840 /* print device information */ 2898 /* print device information */
2841 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2899 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2842 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2900 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2843 2901
2902 pm_runtime_put(&pdev->dev);
2844 platform_set_drvdata(pdev, ndev); 2903 platform_set_drvdata(pdev, ndev);
2845 2904
2846 return ret; 2905 return ret;
2847 2906
2848out_unregister:
2849 unregister_netdev(ndev);
2850
2851out_napi_del: 2907out_napi_del:
2852 netif_napi_del(&mdp->napi); 2908 netif_napi_del(&mdp->napi);
2909 sh_mdio_release(mdp);
2853 2910
2854out_release: 2911out_release:
2855 /* net_dev free */ 2912 /* net_dev free */
2856 if (ndev) 2913 if (ndev)
2857 free_netdev(ndev); 2914 free_netdev(ndev);
2858 2915
2859out: 2916 pm_runtime_put(&pdev->dev);
2917 pm_runtime_disable(&pdev->dev);
2860 return ret; 2918 return ret;
2861} 2919}
2862 2920
@@ -2865,9 +2923,9 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
2865 struct net_device *ndev = platform_get_drvdata(pdev); 2923 struct net_device *ndev = platform_get_drvdata(pdev);
2866 struct sh_eth_private *mdp = netdev_priv(ndev); 2924 struct sh_eth_private *mdp = netdev_priv(ndev);
2867 2925
2868 sh_mdio_release(ndev);
2869 unregister_netdev(ndev); 2926 unregister_netdev(ndev);
2870 netif_napi_del(&mdp->napi); 2927 netif_napi_del(&mdp->napi);
2928 sh_mdio_release(mdp);
2871 pm_runtime_disable(&pdev->dev); 2929 pm_runtime_disable(&pdev->dev);
2872 free_netdev(ndev); 2930 free_netdev(ndev);
2873 2931
@@ -2920,6 +2978,7 @@ static struct platform_driver sh_eth_driver = {
2920 .driver = { 2978 .driver = {
2921 .name = CARDNAME, 2979 .name = CARDNAME,
2922 .pm = SH_ETH_PM_OPS, 2980 .pm = SH_ETH_PM_OPS,
2981 .of_match_table = of_match_ptr(sh_eth_match_table),
2923 }, 2982 },
2924}; 2983};
2925 2984
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 6075915b88ec..d55e37cd5fec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,8 +27,7 @@
27#define RX_RING_MIN 64 27#define RX_RING_MIN 64
28#define TX_RING_MAX 1024 28#define TX_RING_MAX 1024
29#define RX_RING_MAX 1024 29#define RX_RING_MAX 1024
30#define ETHERSMALL 60 30#define PKT_BUF_SZ 1538
31#define PKT_BUF_SZ 1538
32#define SH_ETH_TSU_TIMEOUT_MS 500 31#define SH_ETH_TSU_TIMEOUT_MS 500
33#define SH_ETH_TSU_CAM_ENTRIES 32 32#define SH_ETH_TSU_CAM_ENTRIES 32
34 33
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
new file mode 100644
index 000000000000..7902341f2623
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -0,0 +1,16 @@
1#
2# Samsung Ethernet device configuration
3#
4
5config NET_VENDOR_SAMSUNG
6 bool "Samsung Ethernet device"
7 default y
8 ---help---
9 This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
10 platforms.
11
12if NET_VENDOR_SAMSUNG
13
14source "drivers/net/ethernet/samsung/sxgbe/Kconfig"
15
16endif # NET_VENDOR_SAMSUNG
diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile
new file mode 100644
index 000000000000..1773c29b8d76
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Samsung Ethernet device drivers.
3#
4
5obj-$(CONFIG_SXGBE_ETH) += sxgbe/
diff --git a/drivers/net/ethernet/samsung/sxgbe/Kconfig b/drivers/net/ethernet/samsung/sxgbe/Kconfig
new file mode 100644
index 000000000000..d79288c51d0a
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Kconfig
@@ -0,0 +1,9 @@
1config SXGBE_ETH
2 tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver"
3 depends on HAS_IOMEM && HAS_DMA
4 select PHYLIB
5 select CRC32
6 select PTP_1588_CLOCK
7 ---help---
8 This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
9 platforms.
diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile
new file mode 100644
index 000000000000..dcc80b9d4370
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
2samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
3 sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \
4 sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
new file mode 100644
index 000000000000..6203c7d8550f
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -0,0 +1,535 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __SXGBE_COMMON_H__
14#define __SXGBE_COMMON_H__
15
16/* forward references */
17struct sxgbe_desc_ops;
18struct sxgbe_dma_ops;
19struct sxgbe_mtl_ops;
20
21#define SXGBE_RESOURCE_NAME "sam_sxgbeeth"
22#define DRV_MODULE_VERSION "November_2013"
23
24/* MAX HW feature words */
25#define SXGBE_HW_WORDS 3
26
27#define SXGBE_RX_COE_NONE 0
28
29/* CSR Frequency Access Defines*/
30#define SXGBE_CSR_F_150M 150000000
31#define SXGBE_CSR_F_250M 250000000
32#define SXGBE_CSR_F_300M 300000000
33#define SXGBE_CSR_F_350M 350000000
34#define SXGBE_CSR_F_400M 400000000
35#define SXGBE_CSR_F_500M 500000000
36
37/* pause time */
38#define SXGBE_PAUSE_TIME 0x200
39
40/* tx queues */
41#define SXGBE_TX_QUEUES 8
42#define SXGBE_RX_QUEUES 16
43
44/* Calculated based how much time does it take to fill 256KB Rx memory
45 * at 10Gb speed at 156MHz clock rate and considered little less then
46 * the actual value.
47 */
48#define SXGBE_MAX_DMA_RIWT 0x70
49#define SXGBE_MIN_DMA_RIWT 0x01
50
51/* Tx coalesce parameters */
52#define SXGBE_COAL_TX_TIMER 40000
53#define SXGBE_MAX_COAL_TX_TICK 100000
54#define SXGBE_TX_MAX_FRAMES 512
55#define SXGBE_TX_FRAMES 128
56
57/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */
58#define BUF_SIZE_16KiB 16384
59#define BUF_SIZE_8KiB 8192
60#define BUF_SIZE_4KiB 4096
61#define BUF_SIZE_2KiB 2048
62
63#define SXGBE_DEFAULT_LIT_LS 0x3E8
64#define SXGBE_DEFAULT_TWT_LS 0x0
65
66/* Flow Control defines */
67#define SXGBE_FLOW_OFF 0
68#define SXGBE_FLOW_RX 1
69#define SXGBE_FLOW_TX 2
70#define SXGBE_FLOW_AUTO (SXGBE_FLOW_TX | SXGBE_FLOW_RX)
71
72#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
73
74/* errors */
75#define RX_GMII_ERR 0x01
76#define RX_WATCHDOG_ERR 0x02
77#define RX_CRC_ERR 0x03
78#define RX_GAINT_ERR 0x04
79#define RX_IP_HDR_ERR 0x05
80#define RX_PAYLOAD_ERR 0x06
81#define RX_OVERFLOW_ERR 0x07
82
83/* pkt type */
84#define RX_LEN_PKT 0x00
85#define RX_MACCTL_PKT 0x01
86#define RX_DCBCTL_PKT 0x02
87#define RX_ARP_PKT 0x03
88#define RX_OAM_PKT 0x04
89#define RX_UNTAG_PKT 0x05
90#define RX_OTHER_PKT 0x07
91#define RX_SVLAN_PKT 0x08
92#define RX_CVLAN_PKT 0x09
93#define RX_DVLAN_OCVLAN_ICVLAN_PKT 0x0A
94#define RX_DVLAN_OSVLAN_ISVLAN_PKT 0x0B
95#define RX_DVLAN_OSVLAN_ICVLAN_PKT 0x0C
96#define RX_DVLAN_OCVLAN_ISVLAN_PKT 0x0D
97
98#define RX_NOT_IP_PKT 0x00
99#define RX_IPV4_TCP_PKT 0x01
100#define RX_IPV4_UDP_PKT 0x02
101#define RX_IPV4_ICMP_PKT 0x03
102#define RX_IPV4_UNKNOWN_PKT 0x07
103#define RX_IPV6_TCP_PKT 0x09
104#define RX_IPV6_UDP_PKT 0x0A
105#define RX_IPV6_ICMP_PKT 0x0B
106#define RX_IPV6_UNKNOWN_PKT 0x0F
107
108#define RX_NO_PTP 0x00
109#define RX_PTP_SYNC 0x01
110#define RX_PTP_FOLLOW_UP 0x02
111#define RX_PTP_DELAY_REQ 0x03
112#define RX_PTP_DELAY_RESP 0x04
113#define RX_PTP_PDELAY_REQ 0x05
114#define RX_PTP_PDELAY_RESP 0x06
115#define RX_PTP_PDELAY_FOLLOW_UP 0x07
116#define RX_PTP_ANNOUNCE 0x08
117#define RX_PTP_MGMT 0x09
118#define RX_PTP_SIGNAL 0x0A
119#define RX_PTP_RESV_MSG 0x0F
120
121/* EEE-LPI mode flags*/
122#define TX_ENTRY_LPI_MODE 0x10
123#define TX_EXIT_LPI_MODE 0x20
124#define RX_ENTRY_LPI_MODE 0x40
125#define RX_EXIT_LPI_MODE 0x80
126
127/* EEE-LPI Interrupt status flag */
128#define LPI_INT_STATUS BIT(5)
129
130/* EEE-LPI Default timer values */
131#define LPI_LINK_STATUS_TIMER 0x3E8
132#define LPI_MAC_WAIT_TIMER 0x00
133
134/* EEE-LPI Control and status definitions */
135#define LPI_CTRL_STATUS_TXA BIT(19)
136#define LPI_CTRL_STATUS_PLSDIS BIT(18)
137#define LPI_CTRL_STATUS_PLS BIT(17)
138#define LPI_CTRL_STATUS_LPIEN BIT(16)
139#define LPI_CTRL_STATUS_TXRSTP BIT(11)
140#define LPI_CTRL_STATUS_RXRSTP BIT(10)
141#define LPI_CTRL_STATUS_RLPIST BIT(9)
142#define LPI_CTRL_STATUS_TLPIST BIT(8)
143#define LPI_CTRL_STATUS_RLPIEX BIT(3)
144#define LPI_CTRL_STATUS_RLPIEN BIT(2)
145#define LPI_CTRL_STATUS_TLPIEX BIT(1)
146#define LPI_CTRL_STATUS_TLPIEN BIT(0)
147
148enum dma_irq_status {
149 tx_hard_error = BIT(0),
150 tx_bump_tc = BIT(1),
151 handle_tx = BIT(2),
152 rx_hard_error = BIT(3),
153 rx_bump_tc = BIT(4),
154 handle_rx = BIT(5),
155};
156
157#define NETIF_F_HW_VLAN_ALL (NETIF_F_HW_VLAN_CTAG_RX | \
158 NETIF_F_HW_VLAN_STAG_RX | \
159 NETIF_F_HW_VLAN_CTAG_TX | \
160 NETIF_F_HW_VLAN_STAG_TX | \
161 NETIF_F_HW_VLAN_CTAG_FILTER | \
162 NETIF_F_HW_VLAN_STAG_FILTER)
163
164/* MMC control defines */
165#define SXGBE_MMC_CTRL_CNT_FRZ 0x00000008
166
167/* SXGBE HW ADDR regs */
168#define SXGBE_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
169 (reg * 8))
170#define SXGBE_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
171 (reg * 8))
172#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */
173#define SXGBE_FRAME_FILTER 0x00000004 /* Frame Filter */
174
175/* SXGBE Frame Filter defines */
176#define SXGBE_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
177#define SXGBE_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
178#define SXGBE_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
179#define SXGBE_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
180#define SXGBE_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
181#define SXGBE_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
182#define SXGBE_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
183#define SXGBE_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
184#define SXGBE_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
185#define SXGBE_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
186
187#define SXGBE_HASH_TABLE_SIZE 64
188#define SXGBE_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
189#define SXGBE_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
190
191#define SXGBE_HI_REG_AE 0x80000000
192
193/* Minimum and maximum MTU */
194#define MIN_MTU 68
195#define MAX_MTU 9000
196
197#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \
198 for (queue_num = 0; queue_num < max_queues; queue_num++)
199
200#define DRV_VERSION "1.0.0"
201
202#define SXGBE_MAX_RX_CHANNELS 16
203#define SXGBE_MAX_TX_CHANNELS 16
204
205#define START_MAC_REG_OFFSET 0x0000
206#define MAX_MAC_REG_OFFSET 0x0DFC
207#define START_MTL_REG_OFFSET 0x1000
208#define MAX_MTL_REG_OFFSET 0x18FC
209#define START_DMA_REG_OFFSET 0x3000
210#define MAX_DMA_REG_OFFSET 0x38FC
211
212#define REG_SPACE_SIZE 0x2000
213
214/* sxgbe statistics counters */
215struct sxgbe_extra_stats {
216 /* TX/RX IRQ events */
217 unsigned long tx_underflow_irq;
218 unsigned long tx_process_stopped_irq;
219 unsigned long tx_ctxt_desc_err;
220 unsigned long tx_threshold;
221 unsigned long rx_threshold;
222 unsigned long tx_pkt_n;
223 unsigned long rx_pkt_n;
224 unsigned long normal_irq_n;
225 unsigned long tx_normal_irq_n;
226 unsigned long rx_normal_irq_n;
227 unsigned long napi_poll;
228 unsigned long tx_clean;
229 unsigned long tx_reset_ic_bit;
230 unsigned long rx_process_stopped_irq;
231 unsigned long rx_underflow_irq;
232
233 /* Bus access errors */
234 unsigned long fatal_bus_error_irq;
235 unsigned long tx_read_transfer_err;
236 unsigned long tx_write_transfer_err;
237 unsigned long tx_desc_access_err;
238 unsigned long tx_buffer_access_err;
239 unsigned long tx_data_transfer_err;
240 unsigned long rx_read_transfer_err;
241 unsigned long rx_write_transfer_err;
242 unsigned long rx_desc_access_err;
243 unsigned long rx_buffer_access_err;
244 unsigned long rx_data_transfer_err;
245
246 /* EEE-LPI stats */
247 unsigned long tx_lpi_entry_n;
248 unsigned long tx_lpi_exit_n;
249 unsigned long rx_lpi_entry_n;
250 unsigned long rx_lpi_exit_n;
251 unsigned long eee_wakeup_error_n;
252
253 /* RX specific */
254 /* L2 error */
255 unsigned long rx_code_gmii_err;
256 unsigned long rx_watchdog_err;
257 unsigned long rx_crc_err;
258 unsigned long rx_gaint_pkt_err;
259 unsigned long ip_hdr_err;
260 unsigned long ip_payload_err;
261 unsigned long overflow_error;
262
263 /* L2 Pkt type */
264 unsigned long len_pkt;
265 unsigned long mac_ctl_pkt;
266 unsigned long dcb_ctl_pkt;
267 unsigned long arp_pkt;
268 unsigned long oam_pkt;
269 unsigned long untag_okt;
270 unsigned long other_pkt;
271 unsigned long svlan_tag_pkt;
272 unsigned long cvlan_tag_pkt;
273 unsigned long dvlan_ocvlan_icvlan_pkt;
274 unsigned long dvlan_osvlan_isvlan_pkt;
275 unsigned long dvlan_osvlan_icvlan_pkt;
276 unsigned long dvan_ocvlan_icvlan_pkt;
277
278 /* L3/L4 Pkt type */
279 unsigned long not_ip_pkt;
280 unsigned long ip4_tcp_pkt;
281 unsigned long ip4_udp_pkt;
282 unsigned long ip4_icmp_pkt;
283 unsigned long ip4_unknown_pkt;
284 unsigned long ip6_tcp_pkt;
285 unsigned long ip6_udp_pkt;
286 unsigned long ip6_icmp_pkt;
287 unsigned long ip6_unknown_pkt;
288
289 /* Filter specific */
290 unsigned long vlan_filter_match;
291 unsigned long sa_filter_fail;
292 unsigned long da_filter_fail;
293 unsigned long hash_filter_pass;
294 unsigned long l3_filter_match;
295 unsigned long l4_filter_match;
296
297 /* RX context specific */
298 unsigned long timestamp_dropped;
299 unsigned long rx_msg_type_no_ptp;
300 unsigned long rx_ptp_type_sync;
301 unsigned long rx_ptp_type_follow_up;
302 unsigned long rx_ptp_type_delay_req;
303 unsigned long rx_ptp_type_delay_resp;
304 unsigned long rx_ptp_type_pdelay_req;
305 unsigned long rx_ptp_type_pdelay_resp;
306 unsigned long rx_ptp_type_pdelay_follow_up;
307 unsigned long rx_ptp_announce;
308 unsigned long rx_ptp_mgmt;
309 unsigned long rx_ptp_signal;
310 unsigned long rx_ptp_resv_msg_type;
311};
312
313struct mac_link {
314 int port;
315 int duplex;
316 int speed;
317};
318
319struct mii_regs {
320 unsigned int addr; /* MII Address */
321 unsigned int data; /* MII Data */
322};
323
324struct sxgbe_core_ops {
325 /* MAC core initialization */
326 void (*core_init)(void __iomem *ioaddr);
327 /* Dump MAC registers */
328 void (*dump_regs)(void __iomem *ioaddr);
329 /* Handle extra events on specific interrupts hw dependent */
330 int (*host_irq_status)(void __iomem *ioaddr,
331 struct sxgbe_extra_stats *x);
332 /* Set power management mode (e.g. magic frame) */
333 void (*pmt)(void __iomem *ioaddr, unsigned long mode);
334 /* Set/Get Unicast MAC addresses */
335 void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
336 unsigned int reg_n);
337 void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
338 unsigned int reg_n);
339 void (*enable_rx)(void __iomem *ioaddr, bool enable);
340 void (*enable_tx)(void __iomem *ioaddr, bool enable);
341
342 /* controller version specific operations */
343 int (*get_controller_version)(void __iomem *ioaddr);
344
345 /* If supported then get the optional core features */
346 unsigned int (*get_hw_feature)(void __iomem *ioaddr,
347 unsigned char feature_index);
348 /* adjust SXGBE speed */
349 void (*set_speed)(void __iomem *ioaddr, unsigned char speed);
350
351 /* EEE-LPI specific operations */
352 void (*set_eee_mode)(void __iomem *ioaddr);
353 void (*reset_eee_mode)(void __iomem *ioaddr);
354 void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
355 const int tw);
356 void (*set_eee_pls)(void __iomem *ioaddr, const int link);
357
358 /* Enable disable checksum offload operations */
359 void (*enable_rx_csum)(void __iomem *ioaddr);
360 void (*disable_rx_csum)(void __iomem *ioaddr);
361};
362
363const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
364
365struct sxgbe_ops {
366 const struct sxgbe_core_ops *mac;
367 const struct sxgbe_desc_ops *desc;
368 const struct sxgbe_dma_ops *dma;
369 const struct sxgbe_mtl_ops *mtl;
370 struct mii_regs mii; /* MII register Addresses */
371 struct mac_link link;
372 unsigned int ctrl_uid;
373 unsigned int ctrl_id;
374};
375
376/* SXGBE private data structures */
377struct sxgbe_tx_queue {
378 unsigned int irq_no;
379 struct sxgbe_priv_data *priv_ptr;
380 struct sxgbe_tx_norm_desc *dma_tx;
381 dma_addr_t dma_tx_phy;
382 dma_addr_t *tx_skbuff_dma;
383 struct sk_buff **tx_skbuff;
384 struct timer_list txtimer;
385 spinlock_t tx_lock; /* lock for tx queues */
386 unsigned int cur_tx;
387 unsigned int dirty_tx;
388 u32 tx_count_frames;
389 u32 tx_coal_frames;
390 u32 tx_coal_timer;
391 int hwts_tx_en;
392 u16 prev_mss;
393 u8 queue_no;
394};
395
396struct sxgbe_rx_queue {
397 struct sxgbe_priv_data *priv_ptr;
398 struct sxgbe_rx_norm_desc *dma_rx;
399 struct sk_buff **rx_skbuff;
400 unsigned int cur_rx;
401 unsigned int dirty_rx;
402 unsigned int irq_no;
403 u32 rx_riwt;
404 dma_addr_t *rx_skbuff_dma;
405 dma_addr_t dma_rx_phy;
406 u8 queue_no;
407};
408
409/* SXGBE HW capabilities */
410struct sxgbe_hw_features {
411 /****** CAP [0] *******/
412 unsigned int pmt_remote_wake_up;
413 unsigned int pmt_magic_frame;
414 /* IEEE 1588-2008 */
415 unsigned int atime_stamp;
416
417 unsigned int eee;
418
419 unsigned int tx_csum_offload;
420 unsigned int rx_csum_offload;
421 unsigned int multi_macaddr;
422 unsigned int tstamp_srcselect;
423 unsigned int sa_vlan_insert;
424
425 /****** CAP [1] *******/
426 unsigned int rxfifo_size;
427 unsigned int txfifo_size;
428 unsigned int atstmap_hword;
429 unsigned int dcb_enable;
430 unsigned int splithead_enable;
431 unsigned int tcpseg_offload;
432 unsigned int debug_mem;
433 unsigned int rss_enable;
434 unsigned int hash_tsize;
435 unsigned int l3l4_filer_size;
436
437 /* This value is in bytes and
438 * as mentioned in HW features
439 * of SXGBE data book
440 */
441 unsigned int rx_mtl_qsize;
442 unsigned int tx_mtl_qsize;
443
444 /****** CAP [2] *******/
445 /* TX and RX number of channels */
446 unsigned int rx_mtl_queues;
447 unsigned int tx_mtl_queues;
448 unsigned int rx_dma_channels;
449 unsigned int tx_dma_channels;
450 unsigned int pps_output_count;
451 unsigned int aux_input_count;
452};
453
454struct sxgbe_priv_data {
455 /* DMA descriptos */
456 struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES];
457 struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
458 u8 cur_rx_qnum;
459
460 unsigned int dma_tx_size;
461 unsigned int dma_rx_size;
462 unsigned int dma_buf_sz;
463 u32 rx_riwt;
464
465 struct napi_struct napi;
466
467 void __iomem *ioaddr;
468 struct net_device *dev;
469 struct device *device;
470 struct sxgbe_ops *hw; /* sxgbe specific ops */
471 int no_csum_insertion;
472 int irq;
473 int rxcsum_insertion;
474 spinlock_t stats_lock; /* lock for tx/rx statatics */
475
476 struct phy_device *phydev;
477 int oldlink;
478 int speed;
479 int oldduplex;
480 struct mii_bus *mii;
481 int mii_irq[PHY_MAX_ADDR];
482 u8 rx_pause;
483 u8 tx_pause;
484
485 struct sxgbe_extra_stats xstats;
486 struct sxgbe_plat_data *plat;
487 struct sxgbe_hw_features hw_cap;
488
489 u32 msg_enable;
490
491 struct clk *sxgbe_clk;
492 int clk_csr;
493 unsigned int mode;
494 unsigned int default_addend;
495
496 /* advanced time stamp support */
497 u32 adv_ts;
498 int use_riwt;
499 struct ptp_clock *ptp_clock;
500
501 /* tc control */
502 int tx_tc;
503 int rx_tc;
504 /* EEE-LPI specific members */
505 struct timer_list eee_ctrl_timer;
506 bool tx_path_in_lpi_mode;
507 int lpi_irq;
508 int eee_enabled;
509 int eee_active;
510 int tx_lpi_timer;
511};
512
513/* Function prototypes */
514struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
515 struct sxgbe_plat_data *plat_dat,
516 void __iomem *addr);
517int sxgbe_drv_remove(struct net_device *ndev);
518void sxgbe_set_ethtool_ops(struct net_device *netdev);
519int sxgbe_mdio_unregister(struct net_device *ndev);
520int sxgbe_mdio_register(struct net_device *ndev);
521int sxgbe_register_platform(void);
522void sxgbe_unregister_platform(void);
523
524#ifdef CONFIG_PM
525int sxgbe_suspend(struct net_device *ndev);
526int sxgbe_resume(struct net_device *ndev);
527int sxgbe_freeze(struct net_device *ndev);
528int sxgbe_restore(struct net_device *ndev);
529#endif /* CONFIG_PM */
530
531const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
532
533void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv);
534bool sxgbe_eee_init(struct sxgbe_priv_data * const priv);
535#endif /* __SXGBE_COMMON_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
new file mode 100644
index 000000000000..c4da7a2b002a
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -0,0 +1,262 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/export.h>
16#include <linux/io.h>
17#include <linux/netdevice.h>
18#include <linux/phy.h>
19
20#include "sxgbe_common.h"
21#include "sxgbe_reg.h"
22
23/* MAC core initialization */
24static void sxgbe_core_init(void __iomem *ioaddr)
25{
26 u32 regval;
27
28 /* TX configuration */
29 regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
30 /* Other configurable parameters IFP, IPG, ISR, ISM
31 * needs to be set if needed
32 */
33 regval |= SXGBE_TX_JABBER_DISABLE;
34 writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
35
36 /* RX configuration */
37 regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
38 /* Other configurable parameters CST, SPEN, USP, GPSLCE
39 * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be
40 * set if needed
41 */
42 regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE;
43 writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
44}
45
46/* Dump MAC registers */
47static void sxgbe_core_dump_regs(void __iomem *ioaddr)
48{
49}
50
51static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status)
52{
53 int status = 0;
54 int lpi_status;
55
56 /* Reading this register shall clear all the LPI status bits */
57 lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
58
59 if (lpi_status & LPI_CTRL_STATUS_TLPIEN)
60 status |= TX_ENTRY_LPI_MODE;
61 if (lpi_status & LPI_CTRL_STATUS_TLPIEX)
62 status |= TX_EXIT_LPI_MODE;
63 if (lpi_status & LPI_CTRL_STATUS_RLPIEN)
64 status |= RX_ENTRY_LPI_MODE;
65 if (lpi_status & LPI_CTRL_STATUS_RLPIEX)
66 status |= RX_EXIT_LPI_MODE;
67
68 return status;
69}
70
71/* Handle extra events on specific interrupts hw dependent */
72static int sxgbe_core_host_irq_status(void __iomem *ioaddr,
73 struct sxgbe_extra_stats *x)
74{
75 int irq_status, status = 0;
76
77 irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG);
78
79 if (unlikely(irq_status & LPI_INT_STATUS))
80 status |= sxgbe_get_lpi_status(ioaddr, irq_status);
81
82 return status;
83}
84
85/* Set power management mode (e.g. magic frame) */
86static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
87{
88}
89
90/* Set/Get Unicast MAC addresses */
91static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
92 unsigned int reg_n)
93{
94 u32 high_word, low_word;
95
96 high_word = (addr[5] << 8) | (addr[4]);
97 low_word = (addr[3] << 24) | (addr[2] << 16) |
98 (addr[1] << 8) | (addr[0]);
99 writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
100 writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
101}
102
103static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
104 unsigned int reg_n)
105{
106 u32 high_word, low_word;
107
108 high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
109 low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
110
111 /* extract and assign address */
112 addr[5] = (high_word & 0x0000FF00) >> 8;
113 addr[4] = (high_word & 0x000000FF);
114 addr[3] = (low_word & 0xFF000000) >> 24;
115 addr[2] = (low_word & 0x00FF0000) >> 16;
116 addr[1] = (low_word & 0x0000FF00) >> 8;
117 addr[0] = (low_word & 0x000000FF);
118}
119
120static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable)
121{
122 u32 tx_config;
123
124 tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
125 tx_config &= ~SXGBE_TX_ENABLE;
126
127 if (enable)
128 tx_config |= SXGBE_TX_ENABLE;
129 writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
130}
131
132static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable)
133{
134 u32 rx_config;
135
136 rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
137 rx_config &= ~SXGBE_RX_ENABLE;
138
139 if (enable)
140 rx_config |= SXGBE_RX_ENABLE;
141 writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
142}
143
144static int sxgbe_get_controller_version(void __iomem *ioaddr)
145{
146 return readl(ioaddr + SXGBE_CORE_VERSION_REG);
147}
148
149/* If supported then get the optional core features */
150static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr,
151 unsigned char feature_index)
152{
153 return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index)));
154}
155
156static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
157{
158 u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
159
160 /* clear the speed bits */
161 tx_cfg &= ~0x60000000;
162 tx_cfg |= (speed << SXGBE_SPEED_LSHIFT);
163
164 /* set the speed */
165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
166}
167
168static void sxgbe_set_eee_mode(void __iomem *ioaddr)
169{
170 u32 ctrl;
171
172 /* Enable the LPI mode for transmit path with Tx automate bit set.
173 * When Tx Automate bit is set, MAC internally handles the entry
174 * to LPI mode after all outstanding and pending packets are
175 * transmitted.
176 */
177 ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
178 ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA;
179 writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
180}
181
182static void sxgbe_reset_eee_mode(void __iomem *ioaddr)
183{
184 u32 ctrl;
185
186 ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
187 ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA);
188 writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
189}
190
191static void sxgbe_set_eee_pls(void __iomem *ioaddr, const int link)
192{
193 u32 ctrl;
194
195 ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
196
197 /* If the PHY link status is UP then set PLS */
198 if (link)
199 ctrl |= LPI_CTRL_STATUS_PLS;
200 else
201 ctrl &= ~LPI_CTRL_STATUS_PLS;
202
203 writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
204}
205
206static void sxgbe_set_eee_timer(void __iomem *ioaddr,
207 const int ls, const int tw)
208{
209 int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
210
211 /* Program the timers in the LPI timer control register:
212 * LS: minimum time (ms) for which the link
213 * status from PHY should be ok before transmitting
214 * the LPI pattern.
215 * TW: minimum time (us) for which the core waits
216 * after it has stopped transmitting the LPI pattern.
217 */
218 writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
219}
220
221static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
222{
223 u32 ctrl;
224
225 ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
226 ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
227 writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
228}
229
230static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
231{
232 u32 ctrl;
233
234 ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
235 ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
236 writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
237}
238
239static const struct sxgbe_core_ops core_ops = {
240 .core_init = sxgbe_core_init,
241 .dump_regs = sxgbe_core_dump_regs,
242 .host_irq_status = sxgbe_core_host_irq_status,
243 .pmt = sxgbe_core_pmt,
244 .set_umac_addr = sxgbe_core_set_umac_addr,
245 .get_umac_addr = sxgbe_core_get_umac_addr,
246 .enable_rx = sxgbe_enable_rx,
247 .enable_tx = sxgbe_enable_tx,
248 .get_controller_version = sxgbe_get_controller_version,
249 .get_hw_feature = sxgbe_get_hw_feature,
250 .set_speed = sxgbe_core_set_speed,
251 .set_eee_mode = sxgbe_set_eee_mode,
252 .reset_eee_mode = sxgbe_reset_eee_mode,
253 .set_eee_timer = sxgbe_set_eee_timer,
254 .set_eee_pls = sxgbe_set_eee_pls,
255 .enable_rx_csum = sxgbe_enable_rx_csum,
256 .disable_rx_csum = sxgbe_disable_rx_csum,
257};
258
259const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
260{
261 return &core_ops;
262}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
new file mode 100644
index 000000000000..e896dbbd2e15
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -0,0 +1,515 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/bitops.h>
16#include <linux/export.h>
17#include <linux/io.h>
18#include <linux/netdevice.h>
19#include <linux/phy.h>
20
21#include "sxgbe_common.h"
22#include "sxgbe_dma.h"
23#include "sxgbe_desc.h"
24
25/* DMA TX descriptor ring initialization */
26static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p)
27{
28 p->tdes23.tx_rd_des23.own_bit = 0;
29}
30
31static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse,
32 u32 total_hdr_len, u32 tcp_hdr_len,
33 u32 tcp_payload_len)
34{
35 p->tdes23.tx_rd_des23.tse_bit = is_tse;
36 p->tdes23.tx_rd_des23.buf1_size = total_hdr_len;
37 p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4;
38 p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len = tcp_payload_len;
39}
40
41/* Assign buffer lengths for descriptor */
42static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
43 int buf1_len, int pkt_len, int cksum)
44{
45 p->tdes23.tx_rd_des23.first_desc = is_fd;
46 p->tdes23.tx_rd_des23.buf1_size = buf1_len;
47
48 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
49
50 if (cksum)
51 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
52}
53
54/* Set VLAN control information */
55static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl)
56{
57 p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl;
58}
59
60/* Set the owner of Normal descriptor */
61static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p)
62{
63 p->tdes23.tx_rd_des23.own_bit = 1;
64}
65
66/* Get the owner of Normal descriptor */
67static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p)
68{
69 return p->tdes23.tx_rd_des23.own_bit;
70}
71
72/* Invoked by the xmit function to close the tx descriptor */
73static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p)
74{
75 p->tdes23.tx_rd_des23.last_desc = 1;
76 p->tdes23.tx_rd_des23.int_on_com = 1;
77}
78
79/* Clean the tx descriptor as soon as the tx irq is received */
80static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p)
81{
82 memset(p, 0, sizeof(*p));
83}
84
85/* Clear interrupt on tx frame completion. When this bit is
86 * set an interrupt happens as soon as the frame is transmitted
87 */
88static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p)
89{
90 p->tdes23.tx_rd_des23.int_on_com = 0;
91}
92
93/* Last tx segment reports the transmit status */
94static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p)
95{
96 return p->tdes23.tx_rd_des23.last_desc;
97}
98
99/* Get the buffer size from the descriptor */
100static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p)
101{
102 return p->tdes23.tx_rd_des23.buf1_size;
103}
104
105/* Set tx timestamp enable bit */
106static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p)
107{
108 p->tdes23.tx_rd_des23.timestmp_enable = 1;
109}
110
111/* get tx timestamp status */
112static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p)
113{
114 return p->tdes23.tx_rd_des23.timestmp_enable;
115}
116
117/* TX Context Descripto Specific */
118static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p)
119{
120 p->ctxt_bit = 1;
121}
122
123/* Set the owner of TX context descriptor */
124static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p)
125{
126 p->own_bit = 1;
127}
128
129/* Get the owner of TX context descriptor */
130static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p)
131{
132 return p->own_bit;
133}
134
135/* Set TX mss in TX context Descriptor */
136static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss)
137{
138 p->maxseg_size = mss;
139}
140
141/* Get TX mss from TX context Descriptor */
142static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p)
143{
144 return p->maxseg_size;
145}
146
147/* Set TX tcmssv in TX context Descriptor */
148static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p)
149{
150 p->tcmssv = 1;
151}
152
153/* Reset TX ostc in TX context Descriptor */
154static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p)
155{
156 p->ostc = 0;
157}
158
159/* Set IVLAN information */
160static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p,
161 int is_ivlanvalid, int ivlan_tag,
162 int ivlan_ctl)
163{
164 if (is_ivlanvalid) {
165 p->ivlan_tag_valid = is_ivlanvalid;
166 p->ivlan_tag = ivlan_tag;
167 p->ivlan_tag_ctl = ivlan_ctl;
168 }
169}
170
171/* Return IVLAN Tag */
172static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p)
173{
174 return p->ivlan_tag;
175}
176
177/* Set VLAN Tag */
178static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p,
179 int is_vlanvalid, int vlan_tag)
180{
181 if (is_vlanvalid) {
182 p->vltag_valid = is_vlanvalid;
183 p->vlan_tag = vlan_tag;
184 }
185}
186
187/* Return VLAN Tag */
188static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p)
189{
190 return p->vlan_tag;
191}
192
193/* Set Time stamp */
194static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p,
195 u8 ostc_enable, u64 tstamp)
196{
197 if (ostc_enable) {
198 p->ostc = ostc_enable;
199 p->tstamp_lo = (u32) tstamp;
200 p->tstamp_hi = (u32) (tstamp>>32);
201 }
202}
203/* Close TX context descriptor */
204static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p)
205{
206 p->own_bit = 1;
207}
208
209/* WB status of context descriptor */
210static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p)
211{
212 return p->ctxt_desc_err;
213}
214
215/* DMA RX descriptor ring initialization */
216static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
217 int mode, int end)
218{
219 p->rdes23.rx_rd_des23.own_bit = 1;
220 if (disable_rx_ic)
221 p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic;
222}
223
224/* Get RX own bit */
225static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p)
226{
227 return p->rdes23.rx_rd_des23.own_bit;
228}
229
230/* Set RX own bit */
231static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
232{
233 p->rdes23.rx_rd_des23.own_bit = 1;
234}
235
236/* Get the receive frame size */
237static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
238{
239 return p->rdes23.rx_wb_des23.pkt_len;
240}
241
242/* Return first Descriptor status */
243static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p)
244{
245 return p->rdes23.rx_wb_des23.first_desc;
246}
247
248/* Return Last Descriptor status */
249static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p)
250{
251 return p->rdes23.rx_wb_des23.last_desc;
252}
253
254
255/* Return the RX status looking at the WB fields */
256static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p,
257 struct sxgbe_extra_stats *x, int *checksum)
258{
259 int status = 0;
260
261 *checksum = CHECKSUM_UNNECESSARY;
262 if (p->rdes23.rx_wb_des23.err_summary) {
263 switch (p->rdes23.rx_wb_des23.err_l2_type) {
264 case RX_GMII_ERR:
265 status = -EINVAL;
266 x->rx_code_gmii_err++;
267 break;
268 case RX_WATCHDOG_ERR:
269 status = -EINVAL;
270 x->rx_watchdog_err++;
271 break;
272 case RX_CRC_ERR:
273 status = -EINVAL;
274 x->rx_crc_err++;
275 break;
276 case RX_GAINT_ERR:
277 status = -EINVAL;
278 x->rx_gaint_pkt_err++;
279 break;
280 case RX_IP_HDR_ERR:
281 *checksum = CHECKSUM_NONE;
282 x->ip_hdr_err++;
283 break;
284 case RX_PAYLOAD_ERR:
285 *checksum = CHECKSUM_NONE;
286 x->ip_payload_err++;
287 break;
288 case RX_OVERFLOW_ERR:
289 status = -EINVAL;
290 x->overflow_error++;
291 break;
292 default:
293 pr_err("Invalid Error type\n");
294 break;
295 }
296 } else {
297 switch (p->rdes23.rx_wb_des23.err_l2_type) {
298 case RX_LEN_PKT:
299 x->len_pkt++;
300 break;
301 case RX_MACCTL_PKT:
302 x->mac_ctl_pkt++;
303 break;
304 case RX_DCBCTL_PKT:
305 x->dcb_ctl_pkt++;
306 break;
307 case RX_ARP_PKT:
308 x->arp_pkt++;
309 break;
310 case RX_OAM_PKT:
311 x->oam_pkt++;
312 break;
313 case RX_UNTAG_PKT:
314 x->untag_okt++;
315 break;
316 case RX_OTHER_PKT:
317 x->other_pkt++;
318 break;
319 case RX_SVLAN_PKT:
320 x->svlan_tag_pkt++;
321 break;
322 case RX_CVLAN_PKT:
323 x->cvlan_tag_pkt++;
324 break;
325 case RX_DVLAN_OCVLAN_ICVLAN_PKT:
326 x->dvlan_ocvlan_icvlan_pkt++;
327 break;
328 case RX_DVLAN_OSVLAN_ISVLAN_PKT:
329 x->dvlan_osvlan_isvlan_pkt++;
330 break;
331 case RX_DVLAN_OSVLAN_ICVLAN_PKT:
332 x->dvlan_osvlan_icvlan_pkt++;
333 break;
334 case RX_DVLAN_OCVLAN_ISVLAN_PKT:
335 x->dvlan_ocvlan_icvlan_pkt++;
336 break;
337 default:
338 pr_err("Invalid L2 Packet type\n");
339 break;
340 }
341 }
342
343 /* L3/L4 Pkt type */
344 switch (p->rdes23.rx_wb_des23.layer34_pkt_type) {
345 case RX_NOT_IP_PKT:
346 x->not_ip_pkt++;
347 break;
348 case RX_IPV4_TCP_PKT:
349 x->ip4_tcp_pkt++;
350 break;
351 case RX_IPV4_UDP_PKT:
352 x->ip4_udp_pkt++;
353 break;
354 case RX_IPV4_ICMP_PKT:
355 x->ip4_icmp_pkt++;
356 break;
357 case RX_IPV4_UNKNOWN_PKT:
358 x->ip4_unknown_pkt++;
359 break;
360 case RX_IPV6_TCP_PKT:
361 x->ip6_tcp_pkt++;
362 break;
363 case RX_IPV6_UDP_PKT:
364 x->ip6_udp_pkt++;
365 break;
366 case RX_IPV6_ICMP_PKT:
367 x->ip6_icmp_pkt++;
368 break;
369 case RX_IPV6_UNKNOWN_PKT:
370 x->ip6_unknown_pkt++;
371 break;
372 default:
373 pr_err("Invalid L3/L4 Packet type\n");
374 break;
375 }
376
377 /* Filter */
378 if (p->rdes23.rx_wb_des23.vlan_filter_match)
379 x->vlan_filter_match++;
380
381 if (p->rdes23.rx_wb_des23.sa_filter_fail) {
382 status = -EINVAL;
383 x->sa_filter_fail++;
384 }
385 if (p->rdes23.rx_wb_des23.da_filter_fail) {
386 status = -EINVAL;
387 x->da_filter_fail++;
388 }
389 if (p->rdes23.rx_wb_des23.hash_filter_pass)
390 x->hash_filter_pass++;
391
392 if (p->rdes23.rx_wb_des23.l3_filter_match)
393 x->l3_filter_match++;
394
395 if (p->rdes23.rx_wb_des23.l4_filter_match)
396 x->l4_filter_match++;
397
398 return status;
399}
400
401/* Get own bit of context descriptor */
402static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p)
403{
404 return p->own_bit;
405}
406
407/* Set own bit for context descriptor */
408static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p)
409{
410 p->own_bit = 1;
411}
412
413
414/* Return the reception status looking at Context control information */
415static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p,
416 struct sxgbe_extra_stats *x)
417{
418 if (p->tstamp_dropped)
419 x->timestamp_dropped++;
420
421 /* ptp */
422 if (p->ptp_msgtype == RX_NO_PTP)
423 x->rx_msg_type_no_ptp++;
424 else if (p->ptp_msgtype == RX_PTP_SYNC)
425 x->rx_ptp_type_sync++;
426 else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP)
427 x->rx_ptp_type_follow_up++;
428 else if (p->ptp_msgtype == RX_PTP_DELAY_REQ)
429 x->rx_ptp_type_delay_req++;
430 else if (p->ptp_msgtype == RX_PTP_DELAY_RESP)
431 x->rx_ptp_type_delay_resp++;
432 else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ)
433 x->rx_ptp_type_pdelay_req++;
434 else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP)
435 x->rx_ptp_type_pdelay_resp++;
436 else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP)
437 x->rx_ptp_type_pdelay_follow_up++;
438 else if (p->ptp_msgtype == RX_PTP_ANNOUNCE)
439 x->rx_ptp_announce++;
440 else if (p->ptp_msgtype == RX_PTP_MGMT)
441 x->rx_ptp_mgmt++;
442 else if (p->ptp_msgtype == RX_PTP_SIGNAL)
443 x->rx_ptp_signal++;
444 else if (p->ptp_msgtype == RX_PTP_RESV_MSG)
445 x->rx_ptp_resv_msg_type++;
446}
447
448/* Get rx timestamp status */
449static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p)
450{
451 if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) {
452 pr_err("Time stamp corrupted\n");
453 return 0;
454 }
455
456 return p->tstamp_available;
457}
458
459
460static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p)
461{
462 u64 ns;
463
464 ns = p->tstamp_lo;
465 ns |= ((u64)p->tstamp_hi) << 32;
466
467 return ns;
468}
469
470static const struct sxgbe_desc_ops desc_ops = {
471 .init_tx_desc = sxgbe_init_tx_desc,
472 .tx_desc_enable_tse = sxgbe_tx_desc_enable_tse,
473 .prepare_tx_desc = sxgbe_prepare_tx_desc,
474 .tx_vlanctl_desc = sxgbe_tx_vlanctl_desc,
475 .set_tx_owner = sxgbe_set_tx_owner,
476 .get_tx_owner = sxgbe_get_tx_owner,
477 .close_tx_desc = sxgbe_close_tx_desc,
478 .release_tx_desc = sxgbe_release_tx_desc,
479 .clear_tx_ic = sxgbe_clear_tx_ic,
480 .get_tx_ls = sxgbe_get_tx_ls,
481 .get_tx_len = sxgbe_get_tx_len,
482 .tx_enable_tstamp = sxgbe_tx_enable_tstamp,
483 .get_tx_timestamp_status = sxgbe_get_tx_timestamp_status,
484 .tx_ctxt_desc_set_ctxt = sxgbe_tx_ctxt_desc_set_ctxt,
485 .tx_ctxt_desc_set_owner = sxgbe_tx_ctxt_desc_set_owner,
486 .get_tx_ctxt_owner = sxgbe_tx_ctxt_desc_get_owner,
487 .tx_ctxt_desc_set_mss = sxgbe_tx_ctxt_desc_set_mss,
488 .tx_ctxt_desc_get_mss = sxgbe_tx_ctxt_desc_get_mss,
489 .tx_ctxt_desc_set_tcmssv = sxgbe_tx_ctxt_desc_set_tcmssv,
490 .tx_ctxt_desc_reset_ostc = sxgbe_tx_ctxt_desc_reset_ostc,
491 .tx_ctxt_desc_set_ivlantag = sxgbe_tx_ctxt_desc_set_ivlantag,
492 .tx_ctxt_desc_get_ivlantag = sxgbe_tx_ctxt_desc_get_ivlantag,
493 .tx_ctxt_desc_set_vlantag = sxgbe_tx_ctxt_desc_set_vlantag,
494 .tx_ctxt_desc_get_vlantag = sxgbe_tx_ctxt_desc_get_vlantag,
495 .tx_ctxt_set_tstamp = sxgbe_tx_ctxt_desc_set_tstamp,
496 .close_tx_ctxt_desc = sxgbe_tx_ctxt_desc_close,
497 .get_tx_ctxt_cde = sxgbe_tx_ctxt_desc_get_cde,
498 .init_rx_desc = sxgbe_init_rx_desc,
499 .get_rx_owner = sxgbe_get_rx_owner,
500 .set_rx_owner = sxgbe_set_rx_owner,
501 .get_rx_frame_len = sxgbe_get_rx_frame_len,
502 .get_rx_fd_status = sxgbe_get_rx_fd_status,
503 .get_rx_ld_status = sxgbe_get_rx_ld_status,
504 .rx_wbstatus = sxgbe_rx_wbstatus,
505 .get_rx_ctxt_owner = sxgbe_get_rx_ctxt_owner,
506 .set_rx_ctxt_owner = sxgbe_set_ctxt_rx_owner,
507 .rx_ctxt_wbstatus = sxgbe_rx_ctxt_wbstatus,
508 .get_rx_ctxt_tstamp_status = sxgbe_get_rx_ctxt_tstamp_status,
509 .get_timestamp = sxgbe_get_rx_timestamp,
510};
511
512const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void)
513{
514 return &desc_ops;
515}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
new file mode 100644
index 000000000000..838cb9fb0ea9
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -0,0 +1,298 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_DESC_H__
13#define __SXGBE_DESC_H__
14
15#define SXGBE_DESC_SIZE_BYTES 16
16
17/* forward declaration */
18struct sxgbe_extra_stats;
19
20/* Transmit checksum insertion control */
21enum tdes_csum_insertion {
22 cic_disabled = 0, /* Checksum Insertion Control */
23 cic_only_ip = 1, /* Only IP header */
24 /* IP header but pseudoheader is not calculated */
25 cic_no_pseudoheader = 2,
26 cic_full = 3, /* IP header and pseudoheader */
27};
28
29struct sxgbe_tx_norm_desc {
30 u64 tdes01; /* buf1 address */
31 union {
32 /* TX Read-Format Desc 2,3 */
33 struct {
34 /* TDES2 */
35 u32 buf1_size:14;
36 u32 vlan_tag_ctl:2;
37 u32 buf2_size:14;
38 u32 timestmp_enable:1;
39 u32 int_on_com:1;
40 /* TDES3 */
41 union {
42 u32 tcp_payload_len:18;
43 struct {
44 u32 total_pkt_len:15;
45 u32 reserved1:1;
46 u32 cksum_ctl:2;
47 } cksum_pktlen;
48 } tx_pkt_len;
49
50 u32 tse_bit:1;
51 u32 tcp_hdr_len:4;
52 u32 sa_insert_ctl:3;
53 u32 crc_pad_ctl:2;
54 u32 last_desc:1;
55 u32 first_desc:1;
56 u32 ctxt_bit:1;
57 u32 own_bit:1;
58 } tx_rd_des23;
59
60 /* tx write back Desc 2,3 */
61 struct {
62 /* WB TES2 */
63 u32 reserved1;
64 /* WB TES3 */
65 u32 reserved2:31;
66 u32 own_bit:1;
67 } tx_wb_des23;
68 } tdes23;
69};
70
71struct sxgbe_rx_norm_desc {
72 union {
73 u32 rdes0; /* buf1 address */
74 struct {
75 u32 out_vlan_tag:16;
76 u32 in_vlan_tag:16;
77 } wb_rx_des0;
78 } rd_wb_des0;
79
80 union {
81 u32 rdes1; /* buf2 address or buf1[63:32] */
82 u32 rss_hash; /* Write-back RX */
83 } rd_wb_des1;
84
85 union {
86 /* RX Read format Desc 2,3 */
87 struct{
88 /* RDES2 */
89 u32 buf2_addr;
90 /* RDES3 */
91 u32 buf2_hi_addr:30;
92 u32 int_on_com:1;
93 u32 own_bit:1;
94 } rx_rd_des23;
95
96 /* RX write back */
97 struct{
98 /* WB RDES2 */
99 u32 hdr_len:10;
100 u32 rdes2_reserved:2;
101 u32 elrd_val:1;
102 u32 iovt_sel:1;
103 u32 res_pkt:1;
104 u32 vlan_filter_match:1;
105 u32 sa_filter_fail:1;
106 u32 da_filter_fail:1;
107 u32 hash_filter_pass:1;
108 u32 macaddr_filter_match:8;
109 u32 l3_filter_match:1;
110 u32 l4_filter_match:1;
111 u32 l34_filter_num:3;
112
113 /* WB RDES3 */
114 u32 pkt_len:14;
115 u32 rdes3_reserved:1;
116 u32 err_summary:1;
117 u32 err_l2_type:4;
118 u32 layer34_pkt_type:4;
119 u32 no_coagulation_pkt:1;
120 u32 in_seq_pkt:1;
121 u32 rss_valid:1;
122 u32 context_des_avail:1;
123 u32 last_desc:1;
124 u32 first_desc:1;
125 u32 recv_context_desc:1;
126 u32 own_bit:1;
127 } rx_wb_des23;
128 } rdes23;
129};
130
131/* Context descriptor structure */
132struct sxgbe_tx_ctxt_desc {
133 u32 tstamp_lo;
134 u32 tstamp_hi;
135 u32 maxseg_size:15;
136 u32 reserved1:1;
137 u32 ivlan_tag:16;
138 u32 vlan_tag:16;
139 u32 vltag_valid:1;
140 u32 ivlan_tag_valid:1;
141 u32 ivlan_tag_ctl:2;
142 u32 reserved2:3;
143 u32 ctxt_desc_err:1;
144 u32 reserved3:2;
145 u32 ostc:1;
146 u32 tcmssv:1;
147 u32 reserved4:2;
148 u32 ctxt_bit:1;
149 u32 own_bit:1;
150};
151
152struct sxgbe_rx_ctxt_desc {
153 u32 tstamp_lo;
154 u32 tstamp_hi;
155 u32 reserved1;
156 u32 ptp_msgtype:4;
157 u32 tstamp_available:1;
158 u32 ptp_rsp_err:1;
159 u32 tstamp_dropped:1;
160 u32 reserved2:23;
161 u32 rx_ctxt_desc:1;
162 u32 own_bit:1;
163};
164
165struct sxgbe_desc_ops {
166 /* DMA TX descriptor ring initialization */
167 void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p);
168
169 /* Invoked by the xmit function to prepare the tx descriptor */
170 void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse,
171 u32 total_hdr_len, u32 tcp_hdr_len,
172 u32 tcp_payload_len);
173
174 /* Assign buffer lengths for descriptor */
175 void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd,
176 int buf1_len, int pkt_len, int cksum);
177
178 /* Set VLAN control information */
179 void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl);
180
181 /* Set the owner of the descriptor */
182 void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p);
183
184 /* Get the owner of the descriptor */
185 int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p);
186
187 /* Invoked by the xmit function to close the tx descriptor */
188 void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p);
189
190 /* Clean the tx descriptor as soon as the tx irq is received */
191 void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p);
192
193 /* Clear interrupt on tx frame completion. When this bit is
194 * set an interrupt happens as soon as the frame is transmitted
195 */
196 void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p);
197
198 /* Last tx segment reports the transmit status */
199 int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p);
200
201 /* Get the buffer size from the descriptor */
202 int (*get_tx_len)(struct sxgbe_tx_norm_desc *p);
203
204 /* Set tx timestamp enable bit */
205 void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p);
206
207 /* get tx timestamp status */
208 int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p);
209
210 /* TX Context Descripto Specific */
211 void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p);
212
213 /* Set the owner of the TX context descriptor */
214 void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p);
215
216 /* Get the owner of the TX context descriptor */
217 int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p);
218
219 /* Set TX mss */
220 void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss);
221
222 /* Set TX mss */
223 int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p);
224
225 /* Set TX tcmssv */
226 void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p);
227
228 /* Reset TX ostc */
229 void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p);
230
231 /* Set IVLAN information */
232 void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p,
233 int is_ivlanvalid, int ivlan_tag,
234 int ivlan_ctl);
235
236 /* Return IVLAN Tag */
237 int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p);
238
239 /* Set VLAN Tag */
240 void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p,
241 int is_vlanvalid, int vlan_tag);
242
243 /* Return VLAN Tag */
244 int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p);
245
246 /* Set Time stamp */
247 void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p,
248 u8 ostc_enable, u64 tstamp);
249
250 /* Close TX context descriptor */
251 void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p);
252
253 /* WB status of context descriptor */
254 int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p);
255
256 /* DMA RX descriptor ring initialization */
257 void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
258 int mode, int end);
259
260 /* Get own bit */
261 int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p);
262
263 /* Set own bit */
264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
265
266 /* Get the receive frame size */
267 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
268
269 /* Return first Descriptor status */
270 int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p);
271
272 /* Return first Descriptor status */
273 int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p);
274
275 /* Return the reception status looking at the RDES1 */
276 int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p,
277 struct sxgbe_extra_stats *x, int *checksum);
278
279 /* Get own bit */
280 int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
281
282 /* Set own bit */
283 void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
284
285 /* Return the reception status looking at Context control information */
286 void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p,
287 struct sxgbe_extra_stats *x);
288
289 /* Get rx timestamp status */
290 int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p);
291
292 /* Get timestamp value for rx, need to check this */
293 u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p);
294};
295
296const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void);
297
298#endif /* __SXGBE_DESC_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
new file mode 100644
index 000000000000..28f89c41d0cd
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -0,0 +1,382 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/io.h>
13#include <linux/delay.h>
14#include <linux/export.h>
15#include <linux/io.h>
16#include <linux/netdevice.h>
17#include <linux/phy.h>
18
19#include "sxgbe_common.h"
20#include "sxgbe_dma.h"
21#include "sxgbe_reg.h"
22#include "sxgbe_desc.h"
23
24/* DMA core initialization */
25static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
26{
27 int retry_count = 10;
28 u32 reg_val;
29
30 /* reset the DMA */
31 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
32 while (retry_count--) {
33 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
34 SXGBE_DMA_SOFT_RESET))
35 break;
36 mdelay(10);
37 }
38
39 if (retry_count < 0)
40 return -EBUSY;
41
42 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
43
44 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
45 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
46 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
47 * Set burst_map irrespective of fix_burst value.
48 */
49 if (!fix_burst)
50 reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
51
52 /* write burst len map */
53 reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
54
55 writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
56
57 return 0;
58}
59
60static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
61 int fix_burst, int pbl, dma_addr_t dma_tx,
62 dma_addr_t dma_rx, int t_rsize, int r_rsize)
63{
64 u32 reg_val;
65 dma_addr_t dma_addr;
66
67 reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
68 /* set the pbl */
69 if (fix_burst) {
70 reg_val |= SXGBE_DMA_PBL_X8MODE;
71 writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
72 /* program the TX pbl */
73 reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
74 reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
75 writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
76 /* program the RX pbl */
77 reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
78 reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
79 writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
80 }
81
82 /* program desc registers */
83 writel(upper_32_bits(dma_tx),
84 ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
85 writel(lower_32_bits(dma_tx),
86 ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
87
88 writel(upper_32_bits(dma_rx),
89 ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
90 writel(lower_32_bits(dma_rx),
91 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
92
93 /* program tail pointers */
94 /* assumption: upper 32 bits are constant and
95 * same as TX/RX desc list
96 */
97 dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
98 writel(lower_32_bits(dma_addr),
99 ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
100
101 dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
102 writel(lower_32_bits(dma_addr),
103 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
104 /* program the ring sizes */
105 writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
106 writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
107
108 /* Enable TX/RX interrupts */
109 writel(SXGBE_DMA_ENA_INT,
110 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
111}
112
113static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
114{
115 u32 tx_config;
116
117 tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
118 tx_config |= SXGBE_TX_START_DMA;
119 writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
120}
121
122static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
123{
124 /* Enable TX/RX interrupts */
125 writel(SXGBE_DMA_ENA_INT,
126 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
127}
128
129static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
130{
131 /* Disable TX/RX interrupts */
132 writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
133}
134
135static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
136{
137 int cnum;
138 u32 tx_ctl_reg;
139
140 for (cnum = 0; cnum < tchannels; cnum++) {
141 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
142 tx_ctl_reg |= SXGBE_TX_ENABLE;
143 writel(tx_ctl_reg,
144 ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
145 }
146}
147
148static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
149{
150 u32 tx_ctl_reg;
151
152 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
153 tx_ctl_reg |= SXGBE_TX_ENABLE;
154 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
155}
156
157static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
158{
159 u32 tx_ctl_reg;
160
161 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
162 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
163 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
164}
165
166static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
167{
168 int cnum;
169 u32 tx_ctl_reg;
170
171 for (cnum = 0; cnum < tchannels; cnum++) {
172 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
173 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
174 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
175 }
176}
177
178static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
179{
180 int cnum;
181 u32 rx_ctl_reg;
182
183 for (cnum = 0; cnum < rchannels; cnum++) {
184 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
185 rx_ctl_reg |= SXGBE_RX_ENABLE;
186 writel(rx_ctl_reg,
187 ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
188 }
189}
190
191static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
192{
193 int cnum;
194 u32 rx_ctl_reg;
195
196 for (cnum = 0; cnum < rchannels; cnum++) {
197 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
198 rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
199 writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
200 }
201}
202
203static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
204 struct sxgbe_extra_stats *x)
205{
206 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
207 u32 clear_val = 0;
208 u32 ret_val = 0;
209
210 /* TX Normal Interrupt Summary */
211 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
212 x->normal_irq_n++;
213 if (int_status & SXGBE_DMA_INT_STATUS_TI) {
214 ret_val |= handle_tx;
215 x->tx_normal_irq_n++;
216 clear_val |= SXGBE_DMA_INT_STATUS_TI;
217 }
218
219 if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
220 x->tx_underflow_irq++;
221 ret_val |= tx_bump_tc;
222 clear_val |= SXGBE_DMA_INT_STATUS_TBU;
223 }
224 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
225 /* TX Abnormal Interrupt Summary */
226 if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
227 ret_val |= tx_hard_error;
228 clear_val |= SXGBE_DMA_INT_STATUS_TPS;
229 x->tx_process_stopped_irq++;
230 }
231
232 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
233 ret_val |= tx_hard_error;
234 x->fatal_bus_error_irq++;
235
236 /* Assumption: FBE bit is the combination of
237 * all the bus access erros and cleared when
238 * the respective error bits cleared
239 */
240
241 /* check for actual cause */
242 if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
243 x->tx_read_transfer_err++;
244 clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
245 } else {
246 x->tx_write_transfer_err++;
247 }
248
249 if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
250 x->tx_desc_access_err++;
251 clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
252 } else {
253 x->tx_buffer_access_err++;
254 }
255
256 if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
257 x->tx_data_transfer_err++;
258 clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
259 }
260 }
261
262 /* context descriptor error */
263 if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
264 x->tx_ctxt_desc_err++;
265 clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
266 }
267 }
268
269 /* clear the served bits */
270 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
271
272 return ret_val;
273}
274
275static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
276 struct sxgbe_extra_stats *x)
277{
278 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
279 u32 clear_val = 0;
280 u32 ret_val = 0;
281
282 /* RX Normal Interrupt Summary */
283 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
284 x->normal_irq_n++;
285 if (int_status & SXGBE_DMA_INT_STATUS_RI) {
286 ret_val |= handle_rx;
287 x->rx_normal_irq_n++;
288 clear_val |= SXGBE_DMA_INT_STATUS_RI;
289 }
290 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
291 /* RX Abnormal Interrupt Summary */
292 if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
293 ret_val |= rx_bump_tc;
294 clear_val |= SXGBE_DMA_INT_STATUS_RBU;
295 x->rx_underflow_irq++;
296 }
297
298 if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
299 ret_val |= rx_hard_error;
300 clear_val |= SXGBE_DMA_INT_STATUS_RPS;
301 x->rx_process_stopped_irq++;
302 }
303
304 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
305 ret_val |= rx_hard_error;
306 x->fatal_bus_error_irq++;
307
308 /* Assumption: FBE bit is the combination of
309 * all the bus access erros and cleared when
310 * the respective error bits cleared
311 */
312
313 /* check for actual cause */
314 if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
315 x->rx_read_transfer_err++;
316 clear_val |= SXGBE_DMA_INT_STATUS_REB0;
317 } else {
318 x->rx_write_transfer_err++;
319 }
320
321 if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
322 x->rx_desc_access_err++;
323 clear_val |= SXGBE_DMA_INT_STATUS_REB1;
324 } else {
325 x->rx_buffer_access_err++;
326 }
327
328 if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
329 x->rx_data_transfer_err++;
330 clear_val |= SXGBE_DMA_INT_STATUS_REB2;
331 }
332 }
333 }
334
335 /* clear the served bits */
336 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
337
338 return ret_val;
339}
340
341/* Program the HW RX Watchdog */
342static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
343{
344 u32 que_num;
345
346 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
347 writel(riwt,
348 ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
349 }
350}
351
352static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
353{
354 u32 ctrl;
355
356 ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
357 ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
358 writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
359}
360
361static const struct sxgbe_dma_ops sxgbe_dma_ops = {
362 .init = sxgbe_dma_init,
363 .cha_init = sxgbe_dma_channel_init,
364 .enable_dma_transmission = sxgbe_enable_dma_transmission,
365 .enable_dma_irq = sxgbe_enable_dma_irq,
366 .disable_dma_irq = sxgbe_disable_dma_irq,
367 .start_tx = sxgbe_dma_start_tx,
368 .start_tx_queue = sxgbe_dma_start_tx_queue,
369 .stop_tx = sxgbe_dma_stop_tx,
370 .stop_tx_queue = sxgbe_dma_stop_tx_queue,
371 .start_rx = sxgbe_dma_start_rx,
372 .stop_rx = sxgbe_dma_stop_rx,
373 .tx_dma_int_status = sxgbe_tx_dma_int_status,
374 .rx_dma_int_status = sxgbe_rx_dma_int_status,
375 .rx_watchdog = sxgbe_dma_rx_watchdog,
376 .enable_tso = sxgbe_enable_tso,
377};
378
379const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
380{
381 return &sxgbe_dma_ops;
382}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
new file mode 100644
index 000000000000..1607b54c9bb0
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
@@ -0,0 +1,50 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_DMA_H__
13#define __SXGBE_DMA_H__
14
15/* forward declaration */
16struct sxgbe_extra_stats;
17
18#define SXGBE_DMA_BLENMAP_LSHIFT 1
19#define SXGBE_DMA_TXPBL_LSHIFT 16
20#define SXGBE_DMA_RXPBL_LSHIFT 16
21#define DEFAULT_DMA_PBL 8
22
23struct sxgbe_dma_ops {
24 /* DMA core initialization */
25 int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map);
26 void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst,
27 int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx,
28 int t_rzie, int r_rsize);
29 void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum);
30 void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
31 void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
32 void (*start_tx)(void __iomem *ioaddr, int tchannels);
33 void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum);
34 void (*stop_tx)(void __iomem *ioaddr, int tchannels);
35 void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum);
36 void (*start_rx)(void __iomem *ioaddr, int rchannels);
37 void (*stop_rx)(void __iomem *ioaddr, int rchannels);
38 int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no,
39 struct sxgbe_extra_stats *x);
40 int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no,
41 struct sxgbe_extra_stats *x);
42 /* Program the HW RX Watchdog */
43 void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt);
44 /* Enable TSO for each DMA channel */
45 void (*enable_tso)(void __iomem *ioaddr, u8 chan_num);
46};
47
48const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void);
49
50#endif /* __SXGBE_CORE_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
new file mode 100644
index 000000000000..0415fa50eeb7
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -0,0 +1,524 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/clk.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/net_tstamp.h>
20#include <linux/phy.h>
21#include <linux/ptp_clock_kernel.h>
22
23#include "sxgbe_common.h"
24#include "sxgbe_reg.h"
25#include "sxgbe_dma.h"
26
27struct sxgbe_stats {
28 char stat_string[ETH_GSTRING_LEN];
29 int sizeof_stat;
30 int stat_offset;
31};
32
33#define SXGBE_STAT(m) \
34{ \
35 #m, \
36 FIELD_SIZEOF(struct sxgbe_extra_stats, m), \
37 offsetof(struct sxgbe_priv_data, xstats.m) \
38}
39
40static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
41 /* TX/RX IRQ events */
42 SXGBE_STAT(tx_process_stopped_irq),
43 SXGBE_STAT(tx_ctxt_desc_err),
44 SXGBE_STAT(tx_threshold),
45 SXGBE_STAT(rx_threshold),
46 SXGBE_STAT(tx_pkt_n),
47 SXGBE_STAT(rx_pkt_n),
48 SXGBE_STAT(normal_irq_n),
49 SXGBE_STAT(tx_normal_irq_n),
50 SXGBE_STAT(rx_normal_irq_n),
51 SXGBE_STAT(napi_poll),
52 SXGBE_STAT(tx_clean),
53 SXGBE_STAT(tx_reset_ic_bit),
54 SXGBE_STAT(rx_process_stopped_irq),
55 SXGBE_STAT(rx_underflow_irq),
56
57 /* Bus access errors */
58 SXGBE_STAT(fatal_bus_error_irq),
59 SXGBE_STAT(tx_read_transfer_err),
60 SXGBE_STAT(tx_write_transfer_err),
61 SXGBE_STAT(tx_desc_access_err),
62 SXGBE_STAT(tx_buffer_access_err),
63 SXGBE_STAT(tx_data_transfer_err),
64 SXGBE_STAT(rx_read_transfer_err),
65 SXGBE_STAT(rx_write_transfer_err),
66 SXGBE_STAT(rx_desc_access_err),
67 SXGBE_STAT(rx_buffer_access_err),
68 SXGBE_STAT(rx_data_transfer_err),
69
70 /* EEE-LPI stats */
71 SXGBE_STAT(tx_lpi_entry_n),
72 SXGBE_STAT(tx_lpi_exit_n),
73 SXGBE_STAT(rx_lpi_entry_n),
74 SXGBE_STAT(rx_lpi_exit_n),
75 SXGBE_STAT(eee_wakeup_error_n),
76
77 /* RX specific */
78 /* L2 error */
79 SXGBE_STAT(rx_code_gmii_err),
80 SXGBE_STAT(rx_watchdog_err),
81 SXGBE_STAT(rx_crc_err),
82 SXGBE_STAT(rx_gaint_pkt_err),
83 SXGBE_STAT(ip_hdr_err),
84 SXGBE_STAT(ip_payload_err),
85 SXGBE_STAT(overflow_error),
86
87 /* L2 Pkt type */
88 SXGBE_STAT(len_pkt),
89 SXGBE_STAT(mac_ctl_pkt),
90 SXGBE_STAT(dcb_ctl_pkt),
91 SXGBE_STAT(arp_pkt),
92 SXGBE_STAT(oam_pkt),
93 SXGBE_STAT(untag_okt),
94 SXGBE_STAT(other_pkt),
95 SXGBE_STAT(svlan_tag_pkt),
96 SXGBE_STAT(cvlan_tag_pkt),
97 SXGBE_STAT(dvlan_ocvlan_icvlan_pkt),
98 SXGBE_STAT(dvlan_osvlan_isvlan_pkt),
99 SXGBE_STAT(dvlan_osvlan_icvlan_pkt),
100 SXGBE_STAT(dvan_ocvlan_icvlan_pkt),
101
102 /* L3/L4 Pkt type */
103 SXGBE_STAT(not_ip_pkt),
104 SXGBE_STAT(ip4_tcp_pkt),
105 SXGBE_STAT(ip4_udp_pkt),
106 SXGBE_STAT(ip4_icmp_pkt),
107 SXGBE_STAT(ip4_unknown_pkt),
108 SXGBE_STAT(ip6_tcp_pkt),
109 SXGBE_STAT(ip6_udp_pkt),
110 SXGBE_STAT(ip6_icmp_pkt),
111 SXGBE_STAT(ip6_unknown_pkt),
112
113 /* Filter specific */
114 SXGBE_STAT(vlan_filter_match),
115 SXGBE_STAT(sa_filter_fail),
116 SXGBE_STAT(da_filter_fail),
117 SXGBE_STAT(hash_filter_pass),
118 SXGBE_STAT(l3_filter_match),
119 SXGBE_STAT(l4_filter_match),
120
121 /* RX context specific */
122 SXGBE_STAT(timestamp_dropped),
123 SXGBE_STAT(rx_msg_type_no_ptp),
124 SXGBE_STAT(rx_ptp_type_sync),
125 SXGBE_STAT(rx_ptp_type_follow_up),
126 SXGBE_STAT(rx_ptp_type_delay_req),
127 SXGBE_STAT(rx_ptp_type_delay_resp),
128 SXGBE_STAT(rx_ptp_type_pdelay_req),
129 SXGBE_STAT(rx_ptp_type_pdelay_resp),
130 SXGBE_STAT(rx_ptp_type_pdelay_follow_up),
131 SXGBE_STAT(rx_ptp_announce),
132 SXGBE_STAT(rx_ptp_mgmt),
133 SXGBE_STAT(rx_ptp_signal),
134 SXGBE_STAT(rx_ptp_resv_msg_type),
135};
136#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
137
138static int sxgbe_get_eee(struct net_device *dev,
139 struct ethtool_eee *edata)
140{
141 struct sxgbe_priv_data *priv = netdev_priv(dev);
142
143 if (!priv->hw_cap.eee)
144 return -EOPNOTSUPP;
145
146 edata->eee_enabled = priv->eee_enabled;
147 edata->eee_active = priv->eee_active;
148 edata->tx_lpi_timer = priv->tx_lpi_timer;
149
150 return phy_ethtool_get_eee(priv->phydev, edata);
151}
152
153static int sxgbe_set_eee(struct net_device *dev,
154 struct ethtool_eee *edata)
155{
156 struct sxgbe_priv_data *priv = netdev_priv(dev);
157
158 priv->eee_enabled = edata->eee_enabled;
159
160 if (!priv->eee_enabled) {
161 sxgbe_disable_eee_mode(priv);
162 } else {
163 /* We are asking for enabling the EEE but it is safe
164 * to verify all by invoking the eee_init function.
165 * In case of failure it will return an error.
166 */
167 priv->eee_enabled = sxgbe_eee_init(priv);
168 if (!priv->eee_enabled)
169 return -EOPNOTSUPP;
170
171 /* Do not change tx_lpi_timer in case of failure */
172 priv->tx_lpi_timer = edata->tx_lpi_timer;
173 }
174
175 return phy_ethtool_set_eee(priv->phydev, edata);
176}
177
178static void sxgbe_getdrvinfo(struct net_device *dev,
179 struct ethtool_drvinfo *info)
180{
181 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
182 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
183}
184
185static int sxgbe_getsettings(struct net_device *dev,
186 struct ethtool_cmd *cmd)
187{
188 struct sxgbe_priv_data *priv = netdev_priv(dev);
189
190 if (priv->phydev)
191 return phy_ethtool_gset(priv->phydev, cmd);
192
193 return -EOPNOTSUPP;
194}
195
196static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{
198 struct sxgbe_priv_data *priv = netdev_priv(dev);
199
200 if (priv->phydev)
201 return phy_ethtool_sset(priv->phydev, cmd);
202
203 return -EOPNOTSUPP;
204}
205
206static u32 sxgbe_getmsglevel(struct net_device *dev)
207{
208 struct sxgbe_priv_data *priv = netdev_priv(dev);
209 return priv->msg_enable;
210}
211
212static void sxgbe_setmsglevel(struct net_device *dev, u32 level)
213{
214 struct sxgbe_priv_data *priv = netdev_priv(dev);
215 priv->msg_enable = level;
216}
217
218static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
219{
220 int i;
221 u8 *p = data;
222
223 switch (stringset) {
224 case ETH_SS_STATS:
225 for (i = 0; i < SXGBE_STATS_LEN; i++) {
226 memcpy(p, sxgbe_gstrings_stats[i].stat_string,
227 ETH_GSTRING_LEN);
228 p += ETH_GSTRING_LEN;
229 }
230 break;
231 default:
232 WARN_ON(1);
233 break;
234 }
235}
236
237static int sxgbe_get_sset_count(struct net_device *netdev, int sset)
238{
239 int len;
240
241 switch (sset) {
242 case ETH_SS_STATS:
243 len = SXGBE_STATS_LEN;
244 return len;
245 default:
246 return -EINVAL;
247 }
248}
249
250static void sxgbe_get_ethtool_stats(struct net_device *dev,
251 struct ethtool_stats *dummy, u64 *data)
252{
253 struct sxgbe_priv_data *priv = netdev_priv(dev);
254 int i;
255 char *p;
256
257 if (priv->eee_enabled) {
258 int val = phy_get_eee_err(priv->phydev);
259
260 if (val)
261 priv->xstats.eee_wakeup_error_n = val;
262 }
263
264 for (i = 0; i < SXGBE_STATS_LEN; i++) {
265 p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset;
266 data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64))
267 ? (*(u64 *)p) : (*(u32 *)p);
268 }
269}
270
271static void sxgbe_get_channels(struct net_device *dev,
272 struct ethtool_channels *channel)
273{
274 channel->max_rx = SXGBE_MAX_RX_CHANNELS;
275 channel->max_tx = SXGBE_MAX_TX_CHANNELS;
276 channel->rx_count = SXGBE_RX_QUEUES;
277 channel->tx_count = SXGBE_TX_QUEUES;
278}
279
280static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv)
281{
282 unsigned long clk = clk_get_rate(priv->sxgbe_clk);
283
284 if (!clk)
285 return 0;
286
287 return (riwt * 256) / (clk / 1000000);
288}
289
290static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv)
291{
292 unsigned long clk = clk_get_rate(priv->sxgbe_clk);
293
294 if (!clk)
295 return 0;
296
297 return (usec * (clk / 1000000)) / 256;
298}
299
300static int sxgbe_get_coalesce(struct net_device *dev,
301 struct ethtool_coalesce *ec)
302{
303 struct sxgbe_priv_data *priv = netdev_priv(dev);
304
305 if (priv->use_riwt)
306 ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv);
307
308 return 0;
309}
310
311static int sxgbe_set_coalesce(struct net_device *dev,
312 struct ethtool_coalesce *ec)
313{
314 struct sxgbe_priv_data *priv = netdev_priv(dev);
315 unsigned int rx_riwt;
316
317 if (!ec->rx_coalesce_usecs)
318 return -EINVAL;
319
320 rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv);
321
322 if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT))
323 return -EINVAL;
324 else if (!priv->use_riwt)
325 return -EOPNOTSUPP;
326
327 priv->rx_riwt = rx_riwt;
328 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
329
330 return 0;
331}
332
333static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
334 struct ethtool_rxnfc *cmd)
335{
336 cmd->data = 0;
337
338 /* Report default options for RSS on sxgbe */
339 switch (cmd->flow_type) {
340 case TCP_V4_FLOW:
341 case UDP_V4_FLOW:
342 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
343 case SCTP_V4_FLOW:
344 case AH_ESP_V4_FLOW:
345 case AH_V4_FLOW:
346 case ESP_V4_FLOW:
347 case IPV4_FLOW:
348 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
349 break;
350 case TCP_V6_FLOW:
351 case UDP_V6_FLOW:
352 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
353 case SCTP_V6_FLOW:
354 case AH_ESP_V6_FLOW:
355 case AH_V6_FLOW:
356 case ESP_V6_FLOW:
357 case IPV6_FLOW:
358 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
359 break;
360 default:
361 return -EINVAL;
362 }
363
364 return 0;
365}
366
367static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
368 u32 *rule_locs)
369{
370 struct sxgbe_priv_data *priv = netdev_priv(dev);
371 int ret = -EOPNOTSUPP;
372
373 switch (cmd->cmd) {
374 case ETHTOOL_GRXFH:
375 ret = sxgbe_get_rss_hash_opts(priv, cmd);
376 break;
377 default:
378 break;
379 }
380
381 return ret;
382}
383
384static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
385 struct ethtool_rxnfc *cmd)
386{
387 u32 reg_val = 0;
388
389 /* RSS does not support anything other than hashing
390 * to queues on src and dst IPs and ports
391 */
392 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST |
393 RXH_L4_B_0_1 | RXH_L4_B_2_3))
394 return -EINVAL;
395
396 switch (cmd->flow_type) {
397 case TCP_V4_FLOW:
398 case TCP_V6_FLOW:
399 if (!(cmd->data & RXH_IP_SRC) ||
400 !(cmd->data & RXH_IP_DST) ||
401 !(cmd->data & RXH_L4_B_0_1) ||
402 !(cmd->data & RXH_L4_B_2_3))
403 return -EINVAL;
404 reg_val = SXGBE_CORE_RSS_CTL_TCP4TE;
405 break;
406 case UDP_V4_FLOW:
407 case UDP_V6_FLOW:
408 if (!(cmd->data & RXH_IP_SRC) ||
409 !(cmd->data & RXH_IP_DST) ||
410 !(cmd->data & RXH_L4_B_0_1) ||
411 !(cmd->data & RXH_L4_B_2_3))
412 return -EINVAL;
413 reg_val = SXGBE_CORE_RSS_CTL_UDP4TE;
414 break;
415 case SCTP_V4_FLOW:
416 case AH_ESP_V4_FLOW:
417 case AH_V4_FLOW:
418 case ESP_V4_FLOW:
419 case AH_ESP_V6_FLOW:
420 case AH_V6_FLOW:
421 case ESP_V6_FLOW:
422 case SCTP_V6_FLOW:
423 case IPV4_FLOW:
424 case IPV6_FLOW:
425 if (!(cmd->data & RXH_IP_SRC) ||
426 !(cmd->data & RXH_IP_DST) ||
427 (cmd->data & RXH_L4_B_0_1) ||
428 (cmd->data & RXH_L4_B_2_3))
429 return -EINVAL;
430 reg_val = SXGBE_CORE_RSS_CTL_IP2TE;
431 break;
432 default:
433 return -EINVAL;
434 }
435
436 /* Read SXGBE RSS control register and update */
437 reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
438 writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
439 readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
440
441 return 0;
442}
443
444static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
445{
446 struct sxgbe_priv_data *priv = netdev_priv(dev);
447 int ret = -EOPNOTSUPP;
448
449 switch (cmd->cmd) {
450 case ETHTOOL_SRXFH:
451 ret = sxgbe_set_rss_hash_opt(priv, cmd);
452 break;
453 default:
454 break;
455 }
456
457 return ret;
458}
459
460static void sxgbe_get_regs(struct net_device *dev,
461 struct ethtool_regs *regs, void *space)
462{
463 struct sxgbe_priv_data *priv = netdev_priv(dev);
464 u32 *reg_space = (u32 *)space;
465 int reg_offset;
466 int reg_ix = 0;
467 void __iomem *ioaddr = priv->ioaddr;
468
469 memset(reg_space, 0x0, REG_SPACE_SIZE);
470
471 /* MAC registers */
472 for (reg_offset = START_MAC_REG_OFFSET;
473 reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) {
474 reg_space[reg_ix] = readl(ioaddr + reg_offset);
475 reg_ix++;
476 }
477
478 /* MTL registers */
479 for (reg_offset = START_MTL_REG_OFFSET;
480 reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
481 reg_space[reg_ix] = readl(ioaddr + reg_offset);
482 reg_ix++;
483 }
484
485 /* DMA registers */
486 for (reg_offset = START_DMA_REG_OFFSET;
487 reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
488 reg_space[reg_ix] = readl(ioaddr + reg_offset);
489 reg_ix++;
490 }
491
492 BUG_ON(reg_ix * 4 > REG_SPACE_SIZE);
493}
494
495static int sxgbe_get_regs_len(struct net_device *dev)
496{
497 return REG_SPACE_SIZE;
498}
499
500static const struct ethtool_ops sxgbe_ethtool_ops = {
501 .get_drvinfo = sxgbe_getdrvinfo,
502 .get_settings = sxgbe_getsettings,
503 .set_settings = sxgbe_setsettings,
504 .get_msglevel = sxgbe_getmsglevel,
505 .set_msglevel = sxgbe_setmsglevel,
506 .get_link = ethtool_op_get_link,
507 .get_strings = sxgbe_get_strings,
508 .get_ethtool_stats = sxgbe_get_ethtool_stats,
509 .get_sset_count = sxgbe_get_sset_count,
510 .get_channels = sxgbe_get_channels,
511 .get_coalesce = sxgbe_get_coalesce,
512 .set_coalesce = sxgbe_set_coalesce,
513 .get_rxnfc = sxgbe_get_rxnfc,
514 .set_rxnfc = sxgbe_set_rxnfc,
515 .get_regs = sxgbe_get_regs,
516 .get_regs_len = sxgbe_get_regs_len,
517 .get_eee = sxgbe_get_eee,
518 .set_eee = sxgbe_set_eee,
519};
520
521void sxgbe_set_ethtool_ops(struct net_device *netdev)
522{
523 SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
524}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
new file mode 100644
index 000000000000..a72688e8dc6c
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -0,0 +1,2317 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/clk.h>
16#include <linux/crc32.h>
17#include <linux/dma-mapping.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/if.h>
21#include <linux/if_ether.h>
22#include <linux/if_vlan.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/ip.h>
26#include <linux/kernel.h>
27#include <linux/mii.h>
28#include <linux/module.h>
29#include <linux/net_tstamp.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32#include <linux/platform_device.h>
33#include <linux/prefetch.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/tcp.h>
37#include <linux/sxgbe_platform.h>
38
39#include "sxgbe_common.h"
40#include "sxgbe_desc.h"
41#include "sxgbe_dma.h"
42#include "sxgbe_mtl.h"
43#include "sxgbe_reg.h"
44
45#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
46#define JUMBO_LEN 9000
47
48/* Module parameters */
49#define TX_TIMEO 5000
50#define DMA_TX_SIZE 512
51#define DMA_RX_SIZE 1024
52#define TC_DEFAULT 64
53#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
54/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
55#define SXGBE_DEFAULT_LPI_TIMER 1000
56
57static int debug = -1;
58static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
59
60module_param(eee_timer, int, S_IRUGO | S_IWUSR);
61
62module_param(debug, int, S_IRUGO | S_IWUSR);
63static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
64 NETIF_MSG_LINK | NETIF_MSG_IFUP |
65 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
66
67static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
68static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
69static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
70
71#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
72
73#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
74
75/**
76 * sxgbe_verify_args - verify the driver parameters.
77 * Description: it verifies if some wrong parameter is passed to the driver.
78 * Note that wrong parameters are replaced with the default values.
79 */
80static void sxgbe_verify_args(void)
81{
82 if (unlikely(eee_timer < 0))
83 eee_timer = SXGBE_DEFAULT_LPI_TIMER;
84}
85
86static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
87{
88 /* Check and enter in LPI mode */
89 if (!priv->tx_path_in_lpi_mode)
90 priv->hw->mac->set_eee_mode(priv->ioaddr);
91}
92
93void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
94{
95 /* Exit and disable EEE in case of we are are in LPI state. */
96 priv->hw->mac->reset_eee_mode(priv->ioaddr);
97 del_timer_sync(&priv->eee_ctrl_timer);
98 priv->tx_path_in_lpi_mode = false;
99}
100
101/**
102 * sxgbe_eee_ctrl_timer
103 * @arg : data hook
104 * Description:
105 * If there is no data transfer and if we are not in LPI state,
106 * then MAC Transmitter can be moved to LPI state.
107 */
108static void sxgbe_eee_ctrl_timer(unsigned long arg)
109{
110 struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
111
112 sxgbe_enable_eee_mode(priv);
113 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
114}
115
116/**
117 * sxgbe_eee_init
118 * @priv: private device pointer
119 * Description:
120 * If the EEE support has been enabled while configuring the driver,
121 * if the GMAC actually supports the EEE (from the HW cap reg) and the
122 * phy can also manage EEE, so enable the LPI state and start the timer
123 * to verify if the tx path can enter in LPI state.
124 */
125bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
126{
127 bool ret = false;
128
129 /* MAC core supports the EEE feature. */
130 if (priv->hw_cap.eee) {
131 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv->phydev, 1))
133 return false;
134
135 priv->eee_active = 1;
136 init_timer(&priv->eee_ctrl_timer);
137 priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
138 priv->eee_ctrl_timer.data = (unsigned long)priv;
139 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
140 add_timer(&priv->eee_ctrl_timer);
141
142 priv->hw->mac->set_eee_timer(priv->ioaddr,
143 SXGBE_DEFAULT_LPI_TIMER,
144 priv->tx_lpi_timer);
145
146 pr_info("Energy-Efficient Ethernet initialized\n");
147
148 ret = true;
149 }
150
151 return ret;
152}
153
154static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
155{
156 /* When the EEE has been already initialised we have to
157 * modify the PLS bit in the LPI ctrl & status reg according
158 * to the PHY link status. For this reason.
159 */
160 if (priv->eee_enabled)
161 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
162}
163
164/**
165 * sxgbe_clk_csr_set - dynamically set the MDC clock
166 * @priv: driver private structure
167 * Description: this is to dynamically set the MDC clock according to the csr
168 * clock input.
169 */
170static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
171{
172 u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
173
174 /* assign the proper divider, this will be used during
175 * mdio communication
176 */
177 if (clk_rate < SXGBE_CSR_F_150M)
178 priv->clk_csr = SXGBE_CSR_100_150M;
179 else if (clk_rate <= SXGBE_CSR_F_250M)
180 priv->clk_csr = SXGBE_CSR_150_250M;
181 else if (clk_rate <= SXGBE_CSR_F_300M)
182 priv->clk_csr = SXGBE_CSR_250_300M;
183 else if (clk_rate <= SXGBE_CSR_F_350M)
184 priv->clk_csr = SXGBE_CSR_300_350M;
185 else if (clk_rate <= SXGBE_CSR_F_400M)
186 priv->clk_csr = SXGBE_CSR_350_400M;
187 else if (clk_rate <= SXGBE_CSR_F_500M)
188 priv->clk_csr = SXGBE_CSR_400_500M;
189}
190
191/* minimum number of free TX descriptors required to wake up TX process */
192#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
193
194static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
195{
196 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
197}
198
199/**
200 * sxgbe_adjust_link
201 * @dev: net device structure
202 * Description: it adjusts the link parameters.
203 */
204static void sxgbe_adjust_link(struct net_device *dev)
205{
206 struct sxgbe_priv_data *priv = netdev_priv(dev);
207 struct phy_device *phydev = priv->phydev;
208 u8 new_state = 0;
209 u8 speed = 0xff;
210
211 if (!phydev)
212 return;
213
214 /* SXGBE is not supporting auto-negotiation and
215 * half duplex mode. so, not handling duplex change
216 * in this function. only handling speed and link status
217 */
218 if (phydev->link) {
219 if (phydev->speed != priv->speed) {
220 new_state = 1;
221 switch (phydev->speed) {
222 case SPEED_10000:
223 speed = SXGBE_SPEED_10G;
224 break;
225 case SPEED_2500:
226 speed = SXGBE_SPEED_2_5G;
227 break;
228 case SPEED_1000:
229 speed = SXGBE_SPEED_1G;
230 break;
231 default:
232 netif_err(priv, link, dev,
233 "Speed (%d) not supported\n",
234 phydev->speed);
235 }
236
237 priv->speed = phydev->speed;
238 priv->hw->mac->set_speed(priv->ioaddr, speed);
239 }
240
241 if (!priv->oldlink) {
242 new_state = 1;
243 priv->oldlink = 1;
244 }
245 } else if (priv->oldlink) {
246 new_state = 1;
247 priv->oldlink = 0;
248 priv->speed = SPEED_UNKNOWN;
249 }
250
251 if (new_state & netif_msg_link(priv))
252 phy_print_status(phydev);
253
254 /* Alter the MAC settings for EEE */
255 sxgbe_eee_adjust(priv);
256}
257
258/**
259 * sxgbe_init_phy - PHY initialization
260 * @dev: net device structure
261 * Description: it initializes the driver's PHY state, and attaches the PHY
262 * to the mac driver.
263 * Return value:
264 * 0 on success
265 */
266static int sxgbe_init_phy(struct net_device *ndev)
267{
268 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
269 char bus_id[MII_BUS_ID_SIZE];
270 struct phy_device *phydev;
271 struct sxgbe_priv_data *priv = netdev_priv(ndev);
272 int phy_iface = priv->plat->interface;
273
274 /* assign default link status */
275 priv->oldlink = 0;
276 priv->speed = SPEED_UNKNOWN;
277 priv->oldduplex = DUPLEX_UNKNOWN;
278
279 if (priv->plat->phy_bus_name)
280 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
281 priv->plat->phy_bus_name, priv->plat->bus_id);
282 else
283 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
284 priv->plat->bus_id);
285
286 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
287 priv->plat->phy_addr);
288 netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
289
290 phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
291
292 if (IS_ERR(phydev)) {
293 netdev_err(ndev, "Could not attach to PHY\n");
294 return PTR_ERR(phydev);
295 }
296
297 /* Stop Advertising 1000BASE Capability if interface is not GMII */
298 if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
299 (phy_iface == PHY_INTERFACE_MODE_RMII))
300 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
301 SUPPORTED_1000baseT_Full);
302 if (phydev->phy_id == 0) {
303 phy_disconnect(phydev);
304 return -ENODEV;
305 }
306
307 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
308 __func__, phydev->phy_id, phydev->link);
309
310 /* save phy device in private structure */
311 priv->phydev = phydev;
312
313 return 0;
314}
315
316/**
317 * sxgbe_clear_descriptors: clear descriptors
318 * @priv: driver private structure
319 * Description: this function is called to clear the tx and rx descriptors
320 * in case of both basic and extended descriptors are used.
321 */
322static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
323{
324 int i, j;
325 unsigned int txsize = priv->dma_tx_size;
326 unsigned int rxsize = priv->dma_rx_size;
327
328 /* Clear the Rx/Tx descriptors */
329 for (j = 0; j < SXGBE_RX_QUEUES; j++) {
330 for (i = 0; i < rxsize; i++)
331 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
332 priv->use_riwt, priv->mode,
333 (i == rxsize - 1));
334 }
335
336 for (j = 0; j < SXGBE_TX_QUEUES; j++) {
337 for (i = 0; i < txsize; i++)
338 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
339 }
340}
341
342static int sxgbe_init_rx_buffers(struct net_device *dev,
343 struct sxgbe_rx_norm_desc *p, int i,
344 unsigned int dma_buf_sz,
345 struct sxgbe_rx_queue *rx_ring)
346{
347 struct sxgbe_priv_data *priv = netdev_priv(dev);
348 struct sk_buff *skb;
349
350 skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
351 if (!skb)
352 return -ENOMEM;
353
354 rx_ring->rx_skbuff[i] = skb;
355 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
356 dma_buf_sz, DMA_FROM_DEVICE);
357
358 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
359 netdev_err(dev, "%s: DMA mapping error\n", __func__);
360 dev_kfree_skb_any(skb);
361 return -EINVAL;
362 }
363
364 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
365
366 return 0;
367}
368/**
369 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure
371 * @tx_ring: ring to be intialised
372 * @tx_rsize: ring size
373 * Description: this function initializes the DMA TX descriptor
374 */
375static int init_tx_ring(struct device *dev, u8 queue_no,
376 struct sxgbe_tx_queue *tx_ring, int tx_rsize)
377{
378 /* TX ring is not allcoated */
379 if (!tx_ring) {
380 dev_err(dev, "No memory for TX queue of SXGBE\n");
381 return -ENOMEM;
382 }
383
384 /* allocate memory for TX descriptors */
385 tx_ring->dma_tx = dma_zalloc_coherent(dev,
386 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
387 &tx_ring->dma_tx_phy, GFP_KERNEL);
388 if (!tx_ring->dma_tx)
389 return -ENOMEM;
390
391 /* allocate memory for TX skbuff array */
392 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
393 sizeof(dma_addr_t), GFP_KERNEL);
394 if (!tx_ring->tx_skbuff_dma)
395 goto dmamem_err;
396
397 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
398 sizeof(struct sk_buff *), GFP_KERNEL);
399
400 if (!tx_ring->tx_skbuff)
401 goto dmamem_err;
402
403 /* assign queue number */
404 tx_ring->queue_no = queue_no;
405
406 /* initalise counters */
407 tx_ring->dirty_tx = 0;
408 tx_ring->cur_tx = 0;
409
410 /* initalise TX queue lock */
411 spin_lock_init(&tx_ring->tx_lock);
412
413 return 0;
414
415dmamem_err:
416 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
417 tx_ring->dma_tx, tx_ring->dma_tx_phy);
418 return -ENOMEM;
419}
420
421/**
422 * free_rx_ring - free the RX descriptor ring
423 * @dev: net device structure
424 * @rx_ring: ring to be intialised
425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor
427 */
428void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
429 int rx_rsize)
430{
431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
432 rx_ring->dma_rx, rx_ring->dma_rx_phy);
433 kfree(rx_ring->rx_skbuff_dma);
434 kfree(rx_ring->rx_skbuff);
435}
436
437/**
438 * init_rx_ring - init the RX descriptor ring
439 * @dev: net device structure
440 * @rx_ring: ring to be intialised
441 * @rx_rsize: ring size
442 * Description: this function initializes the DMA RX descriptor
443 */
444static int init_rx_ring(struct net_device *dev, u8 queue_no,
445 struct sxgbe_rx_queue *rx_ring, int rx_rsize)
446{
447 struct sxgbe_priv_data *priv = netdev_priv(dev);
448 int desc_index;
449 unsigned int bfsize = 0;
450 unsigned int ret = 0;
451
452 /* Set the max buffer size according to the MTU. */
453 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
454
455 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
456
457 /* RX ring is not allcoated */
458 if (rx_ring == NULL) {
459 netdev_err(dev, "No memory for RX queue\n");
460 goto error;
461 }
462
463 /* assign queue number */
464 rx_ring->queue_no = queue_no;
465
466 /* allocate memory for RX descriptors */
467 rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
468 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
469 &rx_ring->dma_rx_phy, GFP_KERNEL);
470
471 if (rx_ring->dma_rx == NULL)
472 goto error;
473
474 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL);
477 if (rx_ring->rx_skbuff_dma == NULL)
478 goto dmamem_err;
479
480 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
481 sizeof(struct sk_buff *), GFP_KERNEL);
482 if (rx_ring->rx_skbuff == NULL)
483 goto rxbuff_err;
484
485 /* initialise the buffers */
486 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
487 struct sxgbe_rx_norm_desc *p;
488 p = rx_ring->dma_rx + desc_index;
489 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
490 bfsize, rx_ring);
491 if (ret)
492 goto err_init_rx_buffers;
493 }
494
495 /* initalise counters */
496 rx_ring->cur_rx = 0;
497 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
498 priv->dma_buf_sz = bfsize;
499
500 return 0;
501
502err_init_rx_buffers:
503 while (--desc_index >= 0)
504 free_rx_ring(priv->device, rx_ring, desc_index);
505 kfree(rx_ring->rx_skbuff);
506rxbuff_err:
507 kfree(rx_ring->rx_skbuff_dma);
508dmamem_err:
509 dma_free_coherent(priv->device,
510 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
511 rx_ring->dma_rx, rx_ring->dma_rx_phy);
512error:
513 return -ENOMEM;
514}
515/**
516 * free_tx_ring - free the TX descriptor ring
517 * @dev: net device structure
518 * @tx_ring: ring to be intialised
519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor
521 */
522void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
523 int tx_rsize)
524{
525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
526 tx_ring->dma_tx, tx_ring->dma_tx_phy);
527}
528
529/**
530 * init_dma_desc_rings - init the RX/TX descriptor rings
531 * @dev: net device structure
532 * Description: this function initializes the DMA RX/TX descriptors
533 * and allocates the socket buffers. It suppors the chained and ring
534 * modes.
535 */
536static int init_dma_desc_rings(struct net_device *netd)
537{
538 int queue_num, ret;
539 struct sxgbe_priv_data *priv = netdev_priv(netd);
540 int tx_rsize = priv->dma_tx_size;
541 int rx_rsize = priv->dma_rx_size;
542
543 /* Allocate memory for queue structures and TX descs */
544 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
545 ret = init_tx_ring(priv->device, queue_num,
546 priv->txq[queue_num], tx_rsize);
547 if (ret) {
548 dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
549 goto txalloc_err;
550 }
551
552 /* save private pointer in each ring this
553 * pointer is needed during cleaing TX queue
554 */
555 priv->txq[queue_num]->priv_ptr = priv;
556 }
557
558 /* Allocate memory for queue structures and RX descs */
559 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
560 ret = init_rx_ring(netd, queue_num,
561 priv->rxq[queue_num], rx_rsize);
562 if (ret) {
563 netdev_err(netd, "RX DMA ring allocation failed!!\n");
564 goto rxalloc_err;
565 }
566
567 /* save private pointer in each ring this
568 * pointer is needed during cleaing TX queue
569 */
570 priv->rxq[queue_num]->priv_ptr = priv;
571 }
572
573 sxgbe_clear_descriptors(priv);
574
575 return 0;
576
577txalloc_err:
578 while (queue_num--)
579 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
580 return ret;
581
582rxalloc_err:
583 while (queue_num--)
584 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
585 return ret;
586}
587
588static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
589{
590 int dma_desc;
591 struct sxgbe_priv_data *priv = txqueue->priv_ptr;
592 int tx_rsize = priv->dma_tx_size;
593
594 for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
595 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
596
597 if (txqueue->tx_skbuff_dma[dma_desc])
598 dma_unmap_single(priv->device,
599 txqueue->tx_skbuff_dma[dma_desc],
600 priv->hw->desc->get_tx_len(tdesc),
601 DMA_TO_DEVICE);
602
603 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
604 txqueue->tx_skbuff[dma_desc] = NULL;
605 txqueue->tx_skbuff_dma[dma_desc] = 0;
606 }
607}
608
609
610static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
611{
612 int queue_num;
613
614 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
615 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
616 tx_free_ring_skbufs(tqueue);
617 }
618}
619
620static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
621{
622 int queue_num;
623 int tx_rsize = priv->dma_tx_size;
624 int rx_rsize = priv->dma_rx_size;
625
626 /* Release the DMA TX buffers */
627 dma_free_tx_skbufs(priv);
628
629 /* Release the TX ring memory also */
630 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
631 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
632 }
633
634 /* Release the RX ring memory also */
635 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
636 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
637 }
638}
639
640static int txring_mem_alloc(struct sxgbe_priv_data *priv)
641{
642 int queue_num;
643
644 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
645 priv->txq[queue_num] = devm_kmalloc(priv->device,
646 sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
647 if (!priv->txq[queue_num])
648 return -ENOMEM;
649 }
650
651 return 0;
652}
653
654static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
655{
656 int queue_num;
657
658 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
659 priv->rxq[queue_num] = devm_kmalloc(priv->device,
660 sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
661 if (!priv->rxq[queue_num])
662 return -ENOMEM;
663 }
664
665 return 0;
666}
667
668/**
669 * sxgbe_mtl_operation_mode - HW MTL operation mode
670 * @priv: driver private structure
671 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
672 * or Store-And-Forward capability.
673 */
674static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
675{
676 int queue_num;
677
678 /* TX/RX threshold control */
679 if (likely(priv->plat->force_sf_dma_mode)) {
680 /* set TC mode for TX QUEUES */
681 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
682 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
683 SXGBE_MTL_SFMODE);
684 priv->tx_tc = SXGBE_MTL_SFMODE;
685
686 /* set TC mode for RX QUEUES */
687 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
688 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
689 SXGBE_MTL_SFMODE);
690 priv->rx_tc = SXGBE_MTL_SFMODE;
691 } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
692 /* set TC mode for TX QUEUES */
693 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
694 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
695 priv->tx_tc);
696 /* set TC mode for RX QUEUES */
697 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
698 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
699 priv->rx_tc);
700 } else {
701 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
702 }
703}
704
705/**
706 * sxgbe_tx_queue_clean:
707 * @priv: driver private structure
708 * Description: it reclaims resources after transmission completes.
709 */
710static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
711{
712 struct sxgbe_priv_data *priv = tqueue->priv_ptr;
713 unsigned int tx_rsize = priv->dma_tx_size;
714 struct netdev_queue *dev_txq;
715 u8 queue_no = tqueue->queue_no;
716
717 dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
718
719 spin_lock(&tqueue->tx_lock);
720
721 priv->xstats.tx_clean++;
722 while (tqueue->dirty_tx != tqueue->cur_tx) {
723 unsigned int entry = tqueue->dirty_tx % tx_rsize;
724 struct sk_buff *skb = tqueue->tx_skbuff[entry];
725 struct sxgbe_tx_norm_desc *p;
726
727 p = tqueue->dma_tx + entry;
728
729 /* Check if the descriptor is owned by the DMA. */
730 if (priv->hw->desc->get_tx_owner(p))
731 break;
732
733 if (netif_msg_tx_done(priv))
734 pr_debug("%s: curr %d, dirty %d\n",
735 __func__, tqueue->cur_tx, tqueue->dirty_tx);
736
737 if (likely(tqueue->tx_skbuff_dma[entry])) {
738 dma_unmap_single(priv->device,
739 tqueue->tx_skbuff_dma[entry],
740 priv->hw->desc->get_tx_len(p),
741 DMA_TO_DEVICE);
742 tqueue->tx_skbuff_dma[entry] = 0;
743 }
744
745 if (likely(skb)) {
746 dev_kfree_skb(skb);
747 tqueue->tx_skbuff[entry] = NULL;
748 }
749
750 priv->hw->desc->release_tx_desc(p);
751
752 tqueue->dirty_tx++;
753 }
754
755 /* wake up queue */
756 if (unlikely(netif_tx_queue_stopped(dev_txq) &&
757 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
758 netif_tx_lock(priv->dev);
759 if (netif_tx_queue_stopped(dev_txq) &&
760 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
761 if (netif_msg_tx_done(priv))
762 pr_debug("%s: restart transmit\n", __func__);
763 netif_tx_wake_queue(dev_txq);
764 }
765 netif_tx_unlock(priv->dev);
766 }
767
768 spin_unlock(&tqueue->tx_lock);
769}
770
771/**
772 * sxgbe_tx_clean:
773 * @priv: driver private structure
774 * Description: it reclaims resources after transmission completes.
775 */
776static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
777{
778 u8 queue_num;
779
780 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
781 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
782
783 sxgbe_tx_queue_clean(tqueue);
784 }
785
786 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
787 sxgbe_enable_eee_mode(priv);
788 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
789 }
790}
791
792/**
793 * sxgbe_restart_tx_queue: irq tx error mng function
794 * @priv: driver private structure
795 * Description: it cleans the descriptors and restarts the transmission
796 * in case of errors.
797 */
798static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
799{
800 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
801 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
802 queue_num);
803
804 /* stop the queue */
805 netif_tx_stop_queue(dev_txq);
806
807 /* stop the tx dma */
808 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
809
810 /* free the skbuffs of the ring */
811 tx_free_ring_skbufs(tx_ring);
812
813 /* initalise counters */
814 tx_ring->cur_tx = 0;
815 tx_ring->dirty_tx = 0;
816
817 /* start the tx dma */
818 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
819
820 priv->dev->stats.tx_errors++;
821
822 /* wakeup the queue */
823 netif_tx_wake_queue(dev_txq);
824}
825
826/**
827 * sxgbe_reset_all_tx_queues: irq tx error mng function
828 * @priv: driver private structure
829 * Description: it cleans all the descriptors and
830 * restarts the transmission on all queues in case of errors.
831 */
832static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
833{
834 int queue_num;
835
836 /* On TX timeout of net device, resetting of all queues
837 * may not be proper way, revisit this later if needed
838 */
839 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
840 sxgbe_restart_tx_queue(priv, queue_num);
841}
842
843/**
844 * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
845 * @priv: driver private structure
846 * Description:
847 * new GMAC chip generations have a new register to indicate the
848 * presence of the optional feature/functions.
849 * This can be also used to override the value passed through the
850 * platform and necessary for old MAC10/100 and GMAC chips.
851 */
852static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
853{
854 int rval = 0;
855 struct sxgbe_hw_features *features = &priv->hw_cap;
856
857 /* Read First Capability Register CAP[0] */
858 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
859 if (rval) {
860 features->pmt_remote_wake_up =
861 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
862 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
863 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
864 features->tx_csum_offload =
865 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
866 features->rx_csum_offload =
867 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
868 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
869 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
870 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
871 features->eee = SXGBE_HW_FEAT_EEE(rval);
872 }
873
874 /* Read First Capability Register CAP[1] */
875 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
876 if (rval) {
877 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
878 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
879 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
880 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
881 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
882 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
883 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
884 features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
885 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
886 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
887 }
888
889 /* Read First Capability Register CAP[2] */
890 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
891 if (rval) {
892 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
893 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
894 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
895 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
896 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
897 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
898 }
899
900 return rval;
901}
902
903/**
904 * sxgbe_check_ether_addr: check if the MAC addr is valid
905 * @priv: driver private structure
906 * Description:
907 * it is to verify if the MAC address is valid, in case of failures it
908 * generates a random MAC address
909 */
910static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
911{
912 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
913 priv->hw->mac->get_umac_addr((void __iomem *)
914 priv->ioaddr,
915 priv->dev->dev_addr, 0);
916 if (!is_valid_ether_addr(priv->dev->dev_addr))
917 eth_hw_addr_random(priv->dev);
918 }
919 dev_info(priv->device, "device MAC address %pM\n",
920 priv->dev->dev_addr);
921}
922
923/**
924 * sxgbe_init_dma_engine: DMA init.
925 * @priv: driver private structure
926 * Description:
927 * It inits the DMA invoking the specific SXGBE callback.
928 * Some DMA parameters can be passed from the platform;
929 * in case of these are not passed a default is kept for the MAC or GMAC.
930 */
931static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
932{
933 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
934 int queue_num;
935
936 if (priv->plat->dma_cfg) {
937 pbl = priv->plat->dma_cfg->pbl;
938 fixed_burst = priv->plat->dma_cfg->fixed_burst;
939 burst_map = priv->plat->dma_cfg->burst_map;
940 }
941
942 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
943 priv->hw->dma->cha_init(priv->ioaddr, queue_num,
944 fixed_burst, pbl,
945 (priv->txq[queue_num])->dma_tx_phy,
946 (priv->rxq[queue_num])->dma_rx_phy,
947 priv->dma_tx_size, priv->dma_rx_size);
948
949 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
950}
951
952/**
953 * sxgbe_init_mtl_engine: MTL init.
954 * @priv: driver private structure
955 * Description:
956 * It inits the MTL invoking the specific SXGBE callback.
957 */
958static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
959{
960 int queue_num;
961
962 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
963 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
964 priv->hw_cap.tx_mtl_qsize);
965 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
966 }
967}
968
969/**
970 * sxgbe_disable_mtl_engine: MTL disable.
971 * @priv: driver private structure
972 * Description:
973 * It disables the MTL queues by invoking the specific SXGBE callback.
974 */
975static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
976{
977 int queue_num;
978
979 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
980 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
981}
982
983
984/**
985 * sxgbe_tx_timer: mitigation sw timer for tx.
986 * @data: data pointer
987 * Description:
988 * This is the timer handler to directly invoke the sxgbe_tx_clean.
989 */
990static void sxgbe_tx_timer(unsigned long data)
991{
992 struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
993 sxgbe_tx_queue_clean(p);
994}
995
996/**
997 * sxgbe_init_tx_coalesce: init tx mitigation options.
998 * @priv: driver private structure
999 * Description:
1000 * This inits the transmit coalesce parameters: i.e. timer rate,
1001 * timer handler and default threshold used for enabling the
1002 * interrupt on completion bit.
1003 */
1004static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1005{
1006 u8 queue_num;
1007
1008 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1009 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1010 p->tx_coal_frames = SXGBE_TX_FRAMES;
1011 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1012 init_timer(&p->txtimer);
1013 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1014 p->txtimer.data = (unsigned long)&priv->txq[queue_num];
1015 p->txtimer.function = sxgbe_tx_timer;
1016 add_timer(&p->txtimer);
1017 }
1018}
1019
1020static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
1021{
1022 u8 queue_num;
1023
1024 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1025 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1026 del_timer_sync(&p->txtimer);
1027 }
1028}
1029
1030/**
1031 * sxgbe_open - open entry point of the driver
1032 * @dev : pointer to the device structure.
1033 * Description:
1034 * This function is the open entry point of the driver.
1035 * Return value:
1036 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1037 * file on failure.
1038 */
1039static int sxgbe_open(struct net_device *dev)
1040{
1041 struct sxgbe_priv_data *priv = netdev_priv(dev);
1042 int ret, queue_num;
1043
1044 clk_prepare_enable(priv->sxgbe_clk);
1045
1046 sxgbe_check_ether_addr(priv);
1047
1048 /* Init the phy */
1049 ret = sxgbe_init_phy(dev);
1050 if (ret) {
1051 netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
1052 __func__, ret);
1053 goto phy_error;
1054 }
1055
1056 /* Create and initialize the TX/RX descriptors chains. */
1057 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
1058 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
1059 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
1060 priv->tx_tc = TC_DEFAULT;
1061 priv->rx_tc = TC_DEFAULT;
1062 init_dma_desc_rings(dev);
1063
1064 /* DMA initialization and SW reset */
1065 ret = sxgbe_init_dma_engine(priv);
1066 if (ret < 0) {
1067 netdev_err(dev, "%s: DMA initialization failed\n", __func__);
1068 goto init_error;
1069 }
1070
1071 /* MTL initialization */
1072 sxgbe_init_mtl_engine(priv);
1073
1074 /* Copy the MAC addr into the HW */
1075 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1076
1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr);
1079
1080 /* Request the IRQ lines */
1081 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
1082 IRQF_SHARED, dev->name, dev);
1083 if (unlikely(ret < 0)) {
1084 netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1085 __func__, priv->irq, ret);
1086 goto init_error;
1087 }
1088
1089 /* If the LPI irq is different from the mac irq
1090 * register a dedicated handler
1091 */
1092 if (priv->lpi_irq != dev->irq) {
1093 ret = devm_request_irq(priv->device, priv->lpi_irq,
1094 sxgbe_common_interrupt,
1095 IRQF_SHARED, dev->name, dev);
1096 if (unlikely(ret < 0)) {
1097 netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1098 __func__, priv->lpi_irq, ret);
1099 goto init_error;
1100 }
1101 }
1102
1103 /* Request TX DMA irq lines */
1104 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1105 ret = devm_request_irq(priv->device,
1106 (priv->txq[queue_num])->irq_no,
1107 sxgbe_tx_interrupt, 0,
1108 dev->name, priv->txq[queue_num]);
1109 if (unlikely(ret < 0)) {
1110 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1111 __func__, priv->irq, ret);
1112 goto init_error;
1113 }
1114 }
1115
1116 /* Request RX DMA irq lines */
1117 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1118 ret = devm_request_irq(priv->device,
1119 (priv->rxq[queue_num])->irq_no,
1120 sxgbe_rx_interrupt, 0,
1121 dev->name, priv->rxq[queue_num]);
1122 if (unlikely(ret < 0)) {
1123 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1124 __func__, priv->irq, ret);
1125 goto init_error;
1126 }
1127 }
1128
1129 /* Enable the MAC Rx/Tx */
1130 priv->hw->mac->enable_tx(priv->ioaddr, true);
1131 priv->hw->mac->enable_rx(priv->ioaddr, true);
1132
1133 /* Set the HW DMA mode and the COE */
1134 sxgbe_mtl_operation_mode(priv);
1135
1136 /* Extra statistics */
1137 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1138
1139 priv->xstats.tx_threshold = priv->tx_tc;
1140 priv->xstats.rx_threshold = priv->rx_tc;
1141
1142 /* Start the ball rolling... */
1143 netdev_dbg(dev, "DMA RX/TX processes started...\n");
1144 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1145 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1146
1147 if (priv->phydev)
1148 phy_start(priv->phydev);
1149
1150 /* initalise TX coalesce parameters */
1151 sxgbe_tx_init_coalesce(priv);
1152
1153 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1154 priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1155 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1156 }
1157
1158 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1159 priv->eee_enabled = sxgbe_eee_init(priv);
1160
1161 napi_enable(&priv->napi);
1162 netif_start_queue(dev);
1163
1164 return 0;
1165
1166init_error:
1167 free_dma_desc_resources(priv);
1168 if (priv->phydev)
1169 phy_disconnect(priv->phydev);
1170phy_error:
1171 clk_disable_unprepare(priv->sxgbe_clk);
1172
1173 return ret;
1174}
1175
1176/**
1177 * sxgbe_release - close entry point of the driver
1178 * @dev : device pointer.
1179 * Description:
1180 * This is the stop entry point of the driver.
1181 */
1182static int sxgbe_release(struct net_device *dev)
1183{
1184 struct sxgbe_priv_data *priv = netdev_priv(dev);
1185
1186 if (priv->eee_enabled)
1187 del_timer_sync(&priv->eee_ctrl_timer);
1188
1189 /* Stop and disconnect the PHY */
1190 if (priv->phydev) {
1191 phy_stop(priv->phydev);
1192 phy_disconnect(priv->phydev);
1193 priv->phydev = NULL;
1194 }
1195
1196 netif_tx_stop_all_queues(dev);
1197
1198 napi_disable(&priv->napi);
1199
1200 /* delete TX timers */
1201 sxgbe_tx_del_timer(priv);
1202
1203 /* Stop TX/RX DMA and clear the descriptors */
1204 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1205 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1206
1207 /* disable MTL queue */
1208 sxgbe_disable_mtl_engine(priv);
1209
1210 /* Release and free the Rx/Tx resources */
1211 free_dma_desc_resources(priv);
1212
1213 /* Disable the MAC Rx/Tx */
1214 priv->hw->mac->enable_tx(priv->ioaddr, false);
1215 priv->hw->mac->enable_rx(priv->ioaddr, false);
1216
1217 clk_disable_unprepare(priv->sxgbe_clk);
1218
1219 return 0;
1220}
1221
1222/* Prepare first Tx descriptor for doing TSO operation */
1223void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1224 struct sxgbe_tx_norm_desc *first_desc,
1225 struct sk_buff *skb)
1226{
1227 unsigned int total_hdr_len, tcp_hdr_len;
1228
1229 /* Write first Tx descriptor with appropriate value */
1230 tcp_hdr_len = tcp_hdrlen(skb);
1231 total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
1232
1233 first_desc->tdes01 = dma_map_single(priv->device, skb->data,
1234 total_hdr_len, DMA_TO_DEVICE);
1235 if (dma_mapping_error(priv->device, first_desc->tdes01))
1236 pr_err("%s: TX dma mapping failed!!\n", __func__);
1237
1238 first_desc->tdes23.tx_rd_des23.first_desc = 1;
1239 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
1240 tcp_hdr_len,
1241 skb->len - total_hdr_len);
1242}
1243
1244/**
1245 * sxgbe_xmit: Tx entry point of the driver
1246 * @skb : the socket buffer
1247 * @dev : device pointer
1248 * Description : this is the tx entry point of the driver.
1249 * It programs the chain or the ring and supports oversized frames
1250 * and SG feature.
1251 */
1252static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1253{
1254 unsigned int entry, frag_num;
1255 int cksum_flag = 0;
1256 struct netdev_queue *dev_txq;
1257 unsigned txq_index = skb_get_queue_mapping(skb);
1258 struct sxgbe_priv_data *priv = netdev_priv(dev);
1259 unsigned int tx_rsize = priv->dma_tx_size;
1260 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1261 struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1262 struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
1263 int nr_frags = skb_shinfo(skb)->nr_frags;
1264 int no_pagedlen = skb_headlen(skb);
1265 int is_jumbo = 0;
1266 u16 cur_mss = skb_shinfo(skb)->gso_size;
1267 u32 ctxt_desc_req = 0;
1268
1269 /* get the TX queue handle */
1270 dev_txq = netdev_get_tx_queue(dev, txq_index);
1271
1272 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1273 ctxt_desc_req = 1;
1274
1275 if (unlikely(vlan_tx_tag_present(skb) ||
1276 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1277 tqueue->hwts_tx_en)))
1278 ctxt_desc_req = 1;
1279
1280 /* get the spinlock */
1281 spin_lock(&tqueue->tx_lock);
1282
1283 if (priv->tx_path_in_lpi_mode)
1284 sxgbe_disable_eee_mode(priv);
1285
1286 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1287 if (!netif_tx_queue_stopped(dev_txq)) {
1288 netif_tx_stop_queue(dev_txq);
1289 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1290 __func__, txq_index);
1291 }
1292 /* release the spin lock in case of BUSY */
1293 spin_unlock(&tqueue->tx_lock);
1294 return NETDEV_TX_BUSY;
1295 }
1296
1297 entry = tqueue->cur_tx % tx_rsize;
1298 tx_desc = tqueue->dma_tx + entry;
1299
1300 first_desc = tx_desc;
1301 if (ctxt_desc_req)
1302 ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
1303
1304 /* save the skb address */
1305 tqueue->tx_skbuff[entry] = skb;
1306
1307 if (!is_jumbo) {
1308 if (likely(skb_is_gso(skb))) {
1309 /* TSO support */
1310 if (unlikely(tqueue->prev_mss != cur_mss)) {
1311 priv->hw->desc->tx_ctxt_desc_set_mss(
1312 ctxt_desc, cur_mss);
1313 priv->hw->desc->tx_ctxt_desc_set_tcmssv(
1314 ctxt_desc);
1315 priv->hw->desc->tx_ctxt_desc_reset_ostc(
1316 ctxt_desc);
1317 priv->hw->desc->tx_ctxt_desc_set_ctxt(
1318 ctxt_desc);
1319 priv->hw->desc->tx_ctxt_desc_set_owner(
1320 ctxt_desc);
1321
1322 entry = (++tqueue->cur_tx) % tx_rsize;
1323 first_desc = tqueue->dma_tx + entry;
1324
1325 tqueue->prev_mss = cur_mss;
1326 }
1327 sxgbe_tso_prepare(priv, first_desc, skb);
1328 } else {
1329 tx_desc->tdes01 = dma_map_single(priv->device,
1330 skb->data, no_pagedlen, DMA_TO_DEVICE);
1331 if (dma_mapping_error(priv->device, tx_desc->tdes01))
1332 netdev_err(dev, "%s: TX dma mapping failed!!\n",
1333 __func__);
1334
1335 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1336 no_pagedlen, cksum_flag);
1337 }
1338 }
1339
1340 for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1341 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1342 int len = skb_frag_size(frag);
1343
1344 entry = (++tqueue->cur_tx) % tx_rsize;
1345 tx_desc = tqueue->dma_tx + entry;
1346 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1347 DMA_TO_DEVICE);
1348
1349 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1350 tqueue->tx_skbuff[entry] = NULL;
1351
1352 /* prepare the descriptor */
1353 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1354 len, cksum_flag);
1355 /* memory barrier to flush descriptor */
1356 wmb();
1357
1358 /* set the owner */
1359 priv->hw->desc->set_tx_owner(tx_desc);
1360 }
1361
1362 /* close the descriptors */
1363 priv->hw->desc->close_tx_desc(tx_desc);
1364
1365 /* memory barrier to flush descriptor */
1366 wmb();
1367
1368 tqueue->tx_count_frames += nr_frags + 1;
1369 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1370 priv->hw->desc->clear_tx_ic(tx_desc);
1371 priv->xstats.tx_reset_ic_bit++;
1372 mod_timer(&tqueue->txtimer,
1373 SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1374 } else {
1375 tqueue->tx_count_frames = 0;
1376 }
1377
1378 /* set owner for first desc */
1379 priv->hw->desc->set_tx_owner(first_desc);
1380
1381 /* memory barrier to flush descriptor */
1382 wmb();
1383
1384 tqueue->cur_tx++;
1385
1386 /* display current ring */
1387 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1388 __func__, tqueue->cur_tx % tx_rsize,
1389 tqueue->dirty_tx % tx_rsize, entry,
1390 first_desc, nr_frags);
1391
1392 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1393 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1394 __func__);
1395 netif_tx_stop_queue(dev_txq);
1396 }
1397
1398 dev->stats.tx_bytes += skb->len;
1399
1400 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1401 tqueue->hwts_tx_en)) {
1402 /* declare that device is doing timestamping */
1403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1404 priv->hw->desc->tx_enable_tstamp(first_desc);
1405 }
1406
1407 if (!tqueue->hwts_tx_en)
1408 skb_tx_timestamp(skb);
1409
1410 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1411
1412 spin_unlock(&tqueue->tx_lock);
1413
1414 return NETDEV_TX_OK;
1415}
1416
1417/**
1418 * sxgbe_rx_refill: refill used skb preallocated buffers
1419 * @priv: driver private structure
1420 * Description : this is to reallocate the skb for the reception process
1421 * that is based on zero-copy.
1422 */
1423static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1424{
1425 unsigned int rxsize = priv->dma_rx_size;
1426 int bfsize = priv->dma_buf_sz;
1427 u8 qnum = priv->cur_rx_qnum;
1428
1429 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1430 priv->rxq[qnum]->dirty_rx++) {
1431 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1432 struct sxgbe_rx_norm_desc *p;
1433
1434 p = priv->rxq[qnum]->dma_rx + entry;
1435
1436 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1437 struct sk_buff *skb;
1438
1439 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1440
1441 if (unlikely(skb == NULL))
1442 break;
1443
1444 priv->rxq[qnum]->rx_skbuff[entry] = skb;
1445 priv->rxq[qnum]->rx_skbuff_dma[entry] =
1446 dma_map_single(priv->device, skb->data, bfsize,
1447 DMA_FROM_DEVICE);
1448
1449 p->rdes23.rx_rd_des23.buf2_addr =
1450 priv->rxq[qnum]->rx_skbuff_dma[entry];
1451 }
1452
1453 /* Added memory barrier for RX descriptor modification */
1454 wmb();
1455 priv->hw->desc->set_rx_owner(p);
1456 /* Added memory barrier for RX descriptor modification */
1457 wmb();
1458 }
1459}
1460
1461/**
1462 * sxgbe_rx: receive the frames from the remote host
1463 * @priv: driver private structure
1464 * @limit: napi bugget.
1465 * Description : this the function called by the napi poll method.
1466 * It gets all the frames inside the ring.
1467 */
1468static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1469{
1470 u8 qnum = priv->cur_rx_qnum;
1471 unsigned int rxsize = priv->dma_rx_size;
1472 unsigned int entry = priv->rxq[qnum]->cur_rx;
1473 unsigned int next_entry = 0;
1474 unsigned int count = 0;
1475 int checksum;
1476 int status;
1477
1478 while (count < limit) {
1479 struct sxgbe_rx_norm_desc *p;
1480 struct sk_buff *skb;
1481 int frame_len;
1482
1483 p = priv->rxq[qnum]->dma_rx + entry;
1484
1485 if (priv->hw->desc->get_rx_owner(p))
1486 break;
1487
1488 count++;
1489
1490 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1491 prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1492
1493 /* Read the status of the incoming frame and also get checksum
1494 * value based on whether it is enabled in SXGBE hardware or
1495 * not.
1496 */
1497 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
1498 &checksum);
1499 if (unlikely(status < 0)) {
1500 entry = next_entry;
1501 continue;
1502 }
1503 if (unlikely(!priv->rxcsum_insertion))
1504 checksum = CHECKSUM_NONE;
1505
1506 skb = priv->rxq[qnum]->rx_skbuff[entry];
1507
1508 if (unlikely(!skb))
1509 netdev_err(priv->dev, "rx descriptor is not consistent\n");
1510
1511 prefetch(skb->data - NET_IP_ALIGN);
1512 priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1513
1514 frame_len = priv->hw->desc->get_rx_frame_len(p);
1515
1516 skb_put(skb, frame_len);
1517
1518 skb->ip_summed = checksum;
1519 if (checksum == CHECKSUM_NONE)
1520 netif_receive_skb(skb);
1521 else
1522 napi_gro_receive(&priv->napi, skb);
1523
1524 entry = next_entry;
1525 }
1526
1527 sxgbe_rx_refill(priv);
1528
1529 return count;
1530}
1531
1532/**
1533 * sxgbe_poll - sxgbe poll method (NAPI)
1534 * @napi : pointer to the napi structure.
1535 * @budget : maximum number of packets that the current CPU can receive from
1536 * all interfaces.
1537 * Description :
1538 * To look at the incoming frames and clear the tx resources.
1539 */
1540static int sxgbe_poll(struct napi_struct *napi, int budget)
1541{
1542 struct sxgbe_priv_data *priv = container_of(napi,
1543 struct sxgbe_priv_data, napi);
1544 int work_done = 0;
1545 u8 qnum = priv->cur_rx_qnum;
1546
1547 priv->xstats.napi_poll++;
1548 /* first, clean the tx queues */
1549 sxgbe_tx_all_clean(priv);
1550
1551 work_done = sxgbe_rx(priv, budget);
1552 if (work_done < budget) {
1553 napi_complete(napi);
1554 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1555 }
1556
1557 return work_done;
1558}
1559
1560/**
1561 * sxgbe_tx_timeout
1562 * @dev : Pointer to net device structure
1563 * Description: this function is called when a packet transmission fails to
1564 * complete within a reasonable time. The driver will mark the error in the
1565 * netdev structure and arrange for the device to be reset to a sane state
1566 * in order to transmit a new packet.
1567 */
1568static void sxgbe_tx_timeout(struct net_device *dev)
1569{
1570 struct sxgbe_priv_data *priv = netdev_priv(dev);
1571
1572 sxgbe_reset_all_tx_queues(priv);
1573}
1574
1575/**
1576 * sxgbe_common_interrupt - main ISR
1577 * @irq: interrupt number.
1578 * @dev_id: to pass the net device pointer.
1579 * Description: this is the main driver interrupt service routine.
1580 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1581 * interrupts.
1582 */
1583static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1584{
1585 struct net_device *netdev = (struct net_device *)dev_id;
1586 struct sxgbe_priv_data *priv = netdev_priv(netdev);
1587 int status;
1588
1589 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1590 /* For LPI we need to save the tx status */
1591 if (status & TX_ENTRY_LPI_MODE) {
1592 priv->xstats.tx_lpi_entry_n++;
1593 priv->tx_path_in_lpi_mode = true;
1594 }
1595 if (status & TX_EXIT_LPI_MODE) {
1596 priv->xstats.tx_lpi_exit_n++;
1597 priv->tx_path_in_lpi_mode = false;
1598 }
1599 if (status & RX_ENTRY_LPI_MODE)
1600 priv->xstats.rx_lpi_entry_n++;
1601 if (status & RX_EXIT_LPI_MODE)
1602 priv->xstats.rx_lpi_exit_n++;
1603
1604 return IRQ_HANDLED;
1605}
1606
1607/**
1608 * sxgbe_tx_interrupt - TX DMA ISR
1609 * @irq: interrupt number.
1610 * @dev_id: to pass the net device pointer.
1611 * Description: this is the tx dma interrupt service routine.
1612 */
1613static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1614{
1615 int status;
1616 struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1617 struct sxgbe_priv_data *priv = txq->priv_ptr;
1618
1619 /* get the channel status */
1620 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1621 &priv->xstats);
1622 /* check for normal path */
1623 if (likely((status & handle_tx)))
1624 napi_schedule(&priv->napi);
1625
1626 /* check for unrecoverable error */
1627 if (unlikely((status & tx_hard_error)))
1628 sxgbe_restart_tx_queue(priv, txq->queue_no);
1629
1630 /* check for TC configuration change */
1631 if (unlikely((status & tx_bump_tc) &&
1632 (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1633 (priv->tx_tc < 512))) {
1634 /* step of TX TC is 32 till 128, otherwise 64 */
1635 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1636 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1637 txq->queue_no, priv->tx_tc);
1638 priv->xstats.tx_threshold = priv->tx_tc;
1639 }
1640
1641 return IRQ_HANDLED;
1642}
1643
1644/**
1645 * sxgbe_rx_interrupt - RX DMA ISR
1646 * @irq: interrupt number.
1647 * @dev_id: to pass the net device pointer.
1648 * Description: this is the rx dma interrupt service routine.
1649 */
1650static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1651{
1652 int status;
1653 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1654 struct sxgbe_priv_data *priv = rxq->priv_ptr;
1655
1656 /* get the channel status */
1657 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1658 &priv->xstats);
1659
1660 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1661 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1662 __napi_schedule(&priv->napi);
1663 }
1664
1665 /* check for TC configuration change */
1666 if (unlikely((status & rx_bump_tc) &&
1667 (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1668 (priv->rx_tc < 128))) {
1669 /* step of TC is 32 */
1670 priv->rx_tc += 32;
1671 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1672 rxq->queue_no, priv->rx_tc);
1673 priv->xstats.rx_threshold = priv->rx_tc;
1674 }
1675
1676 return IRQ_HANDLED;
1677}
1678
1679static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1680{
1681 u64 val = readl(ioaddr + reg_lo);
1682
1683 val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1684
1685 return val;
1686}
1687
1688
1689/* sxgbe_get_stats64 - entry point to see statistical information of device
1690 * @dev : device pointer.
1691 * @stats : pointer to hold all the statistical information of device.
1692 * Description:
1693 * This function is a driver entry point whenever ifconfig command gets
1694 * executed to see device statistics. Statistics are number of
1695 * bytes sent or received, errors occured etc.
1696 * Return value:
1697 * This function returns various statistical information of device.
1698 */
1699static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
1700 struct rtnl_link_stats64 *stats)
1701{
1702 struct sxgbe_priv_data *priv = netdev_priv(dev);
1703 void __iomem *ioaddr = priv->ioaddr;
1704 u64 count;
1705
1706 spin_lock(&priv->stats_lock);
1707 /* Freeze the counter registers before reading value otherwise it may
1708 * get updated by hardware while we are reading them
1709 */
1710 writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1711
1712 stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1713 SXGBE_MMC_RXOCTETLO_GCNT_REG,
1714 SXGBE_MMC_RXOCTETHI_GCNT_REG);
1715
1716 stats->rx_packets = sxgbe_get_stat64(ioaddr,
1717 SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1718 SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1719
1720 stats->multicast = sxgbe_get_stat64(ioaddr,
1721 SXGBE_MMC_RXMULTILO_GCNT_REG,
1722 SXGBE_MMC_RXMULTIHI_GCNT_REG);
1723
1724 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1725 SXGBE_MMC_RXCRCERRLO_REG,
1726 SXGBE_MMC_RXCRCERRHI_REG);
1727
1728 stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1729 SXGBE_MMC_RXLENERRLO_REG,
1730 SXGBE_MMC_RXLENERRHI_REG);
1731
1732 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1733 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1734 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1735
1736 stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1737 SXGBE_MMC_TXOCTETLO_GCNT_REG,
1738 SXGBE_MMC_TXOCTETHI_GCNT_REG);
1739
1740 count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1741 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1742
1743 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1744 SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1745 stats->tx_errors = count - stats->tx_errors;
1746 stats->tx_packets = count;
1747 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1748 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1749 writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1750 spin_unlock(&priv->stats_lock);
1751
1752 return stats;
1753}
1754
1755/* sxgbe_set_features - entry point to set offload features of the device.
1756 * @dev : device pointer.
1757 * @features : features which are required to be set.
1758 * Description:
1759 * This function is a driver entry point and called by Linux kernel whenever
1760 * any device features are set or reset by user.
1761 * Return value:
1762 * This function returns 0 after setting or resetting device features.
1763 */
1764static int sxgbe_set_features(struct net_device *dev,
1765 netdev_features_t features)
1766{
1767 struct sxgbe_priv_data *priv = netdev_priv(dev);
1768 netdev_features_t changed = dev->features ^ features;
1769
1770 if (changed & NETIF_F_RXCSUM) {
1771 if (features & NETIF_F_RXCSUM) {
1772 priv->hw->mac->enable_rx_csum(priv->ioaddr);
1773 priv->rxcsum_insertion = true;
1774 } else {
1775 priv->hw->mac->disable_rx_csum(priv->ioaddr);
1776 priv->rxcsum_insertion = false;
1777 }
1778 }
1779
1780 return 0;
1781}
1782
1783/* sxgbe_change_mtu - entry point to change MTU size for the device.
1784 * @dev : device pointer.
1785 * @new_mtu : the new MTU size for the device.
1786 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1787 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1788 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1789 * Return value:
1790 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1791 * file on failure.
1792 */
1793static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1794{
1795 /* RFC 791, page 25, "Every internet module must be able to forward
1796 * a datagram of 68 octets without further fragmentation."
1797 */
1798 if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
1799 netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
1800 MIN_MTU, MAX_MTU);
1801 return -EINVAL;
1802 }
1803
1804 /* Return if the buffer sizes will not change */
1805 if (dev->mtu == new_mtu)
1806 return 0;
1807
1808 dev->mtu = new_mtu;
1809
1810 if (!netif_running(dev))
1811 return 0;
1812
1813 /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1814 * changed then reinitilisation of the receive ring buffers need to be
1815 * done. Hence bring interface down and bring interface back up
1816 */
1817 sxgbe_release(dev);
1818 return sxgbe_open(dev);
1819}
1820
1821static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1822 unsigned int reg_n)
1823{
1824 unsigned long data;
1825
1826 data = (addr[5] << 8) | addr[4];
1827 /* For MAC Addr registers se have to set the Address Enable (AE)
1828 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1829 * is RO.
1830 */
1831 writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1832 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1833 writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1834}
1835
1836/**
1837 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1838 * a device. unicast, multicast addressing
1839 * @dev : pointer to the device structure
1840 * Description:
1841 * This function is a driver entry point which gets called by the kernel
1842 * whenever different receive mode like unicast, multicast and promiscuous
1843 * must be enabled/disabled.
1844 * Return value:
1845 * void.
1846 */
1847static void sxgbe_set_rx_mode(struct net_device *dev)
1848{
1849 struct sxgbe_priv_data *priv = netdev_priv(dev);
1850 void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1851 unsigned int value = 0;
1852 u32 mc_filter[2];
1853 struct netdev_hw_addr *ha;
1854 int reg = 1;
1855
1856 netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1857 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1858
1859 if (dev->flags & IFF_PROMISC) {
1860 value = SXGBE_FRAME_FILTER_PR;
1861
1862 } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1863 (dev->flags & IFF_ALLMULTI)) {
1864 value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
1865 writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1866 writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1867
1868 } else if (!netdev_mc_empty(dev)) {
1869 /* Hash filter for multicast */
1870 value = SXGBE_FRAME_FILTER_HMC;
1871
1872 memset(mc_filter, 0, sizeof(mc_filter));
1873 netdev_for_each_mc_addr(ha, dev) {
1874 /* The upper 6 bits of the calculated CRC are used to
1875 * index the contens of the hash table
1876 */
1877 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1878
1879 /* The most significant bit determines the register to
1880 * use (H/L) while the other 5 bits determine the bit
1881 * within the register.
1882 */
1883 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1884 }
1885 writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1886 writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1887 }
1888
1889 /* Handle multiple unicast addresses (perfect filtering) */
1890 if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1891 /* Switch to promiscuous mode if more than 16 addrs
1892 * are required
1893 */
1894 value |= SXGBE_FRAME_FILTER_PR;
1895 else {
1896 netdev_for_each_uc_addr(ha, dev) {
1897 sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1898 reg++;
1899 }
1900 }
1901#ifdef FRAME_FILTER_DEBUG
1902 /* Enable Receive all mode (to debug filtering_fail errors) */
1903 value |= SXGBE_FRAME_FILTER_RA;
1904#endif
1905 writel(value, ioaddr + SXGBE_FRAME_FILTER);
1906
1907 netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1908 readl(ioaddr + SXGBE_FRAME_FILTER),
1909 readl(ioaddr + SXGBE_HASH_HIGH),
1910 readl(ioaddr + SXGBE_HASH_LOW));
1911}
1912
1913/**
1914 * sxgbe_config - entry point for changing configuration mode passed on by
1915 * ifconfig
1916 * @dev : pointer to the device structure
1917 * @map : pointer to the device mapping structure
1918 * Description:
1919 * This function is a driver entry point which gets called by the kernel
1920 * whenever some device configuration is changed.
1921 * Return value:
1922 * This function returns 0 if success and appropriate error otherwise.
1923 */
1924static int sxgbe_config(struct net_device *dev, struct ifmap *map)
1925{
1926 struct sxgbe_priv_data *priv = netdev_priv(dev);
1927
1928 /* Can't act on a running interface */
1929 if (dev->flags & IFF_UP)
1930 return -EBUSY;
1931
1932 /* Don't allow changing the I/O address */
1933 if (map->base_addr != (unsigned long)priv->ioaddr) {
1934 netdev_warn(dev, "can't change I/O address\n");
1935 return -EOPNOTSUPP;
1936 }
1937
1938 /* Don't allow changing the IRQ */
1939 if (map->irq != priv->irq) {
1940 netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
1941 return -EOPNOTSUPP;
1942 }
1943
1944 return 0;
1945}
1946
1947#ifdef CONFIG_NET_POLL_CONTROLLER
1948/**
1949 * sxgbe_poll_controller - entry point for polling receive by device
1950 * @dev : pointer to the device structure
1951 * Description:
1952 * This function is used by NETCONSOLE and other diagnostic tools
1953 * to allow network I/O with interrupts disabled.
1954 * Return value:
1955 * Void.
1956 */
1957static void sxgbe_poll_controller(struct net_device *dev)
1958{
1959 struct sxgbe_priv_data *priv = netdev_priv(dev);
1960
1961 disable_irq(priv->irq);
1962 sxgbe_rx_interrupt(priv->irq, dev);
1963 enable_irq(priv->irq);
1964}
1965#endif
1966
1967/* sxgbe_ioctl - Entry point for the Ioctl
1968 * @dev: Device pointer.
1969 * @rq: An IOCTL specefic structure, that can contain a pointer to
1970 * a proprietary structure used to pass information to the driver.
1971 * @cmd: IOCTL command
1972 * Description:
1973 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1974 */
1975static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1976{
1977 struct sxgbe_priv_data *priv = netdev_priv(dev);
1978 int ret = -EOPNOTSUPP;
1979
1980 if (!netif_running(dev))
1981 return -EINVAL;
1982
1983 switch (cmd) {
1984 case SIOCGMIIPHY:
1985 case SIOCGMIIREG:
1986 case SIOCSMIIREG:
1987 if (!priv->phydev)
1988 return -EINVAL;
1989 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1990 break;
1991 default:
1992 break;
1993 }
1994
1995 return ret;
1996}
1997
1998static const struct net_device_ops sxgbe_netdev_ops = {
1999 .ndo_open = sxgbe_open,
2000 .ndo_start_xmit = sxgbe_xmit,
2001 .ndo_stop = sxgbe_release,
2002 .ndo_get_stats64 = sxgbe_get_stats64,
2003 .ndo_change_mtu = sxgbe_change_mtu,
2004 .ndo_set_features = sxgbe_set_features,
2005 .ndo_set_rx_mode = sxgbe_set_rx_mode,
2006 .ndo_tx_timeout = sxgbe_tx_timeout,
2007 .ndo_do_ioctl = sxgbe_ioctl,
2008 .ndo_set_config = sxgbe_config,
2009#ifdef CONFIG_NET_POLL_CONTROLLER
2010 .ndo_poll_controller = sxgbe_poll_controller,
2011#endif
2012 .ndo_set_mac_address = eth_mac_addr,
2013};
2014
2015/* Get the hardware ops */
2016static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
2017{
2018 ops_ptr->mac = sxgbe_get_core_ops();
2019 ops_ptr->desc = sxgbe_get_desc_ops();
2020 ops_ptr->dma = sxgbe_get_dma_ops();
2021 ops_ptr->mtl = sxgbe_get_mtl_ops();
2022
2023 /* set the MDIO communication Address/Data regisers */
2024 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
2025 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
2026
2027 /* Assigning the default link settings
2028 * no SXGBE defined default values to be set in registers,
2029 * so assigning as 0 for port and duplex
2030 */
2031 ops_ptr->link.port = 0;
2032 ops_ptr->link.duplex = 0;
2033 ops_ptr->link.speed = SXGBE_SPEED_10G;
2034}
2035
2036/**
2037 * sxgbe_hw_init - Init the GMAC device
2038 * @priv: driver private structure
2039 * Description: this function checks the HW capability
2040 * (if supported) and sets the driver's features.
2041 */
2042static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2043{
2044 u32 ctrl_ids;
2045
2046 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
2047 if(!priv->hw)
2048 return -ENOMEM;
2049
2050 /* get the hardware ops */
2051 sxgbe_get_ops(priv->hw);
2052
2053 /* get the controller id */
2054 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
2055 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
2056 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
2057 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2058 priv->hw->ctrl_uid, priv->hw->ctrl_id);
2059
2060 /* get the H/W features */
2061 if (!sxgbe_get_hw_features(priv))
2062 pr_info("Hardware features not found\n");
2063
2064 if (priv->hw_cap.tx_csum_offload)
2065 pr_info("TX Checksum offload supported\n");
2066
2067 if (priv->hw_cap.rx_csum_offload)
2068 pr_info("RX Checksum offload supported\n");
2069
2070 return 0;
2071}
2072
2073/**
2074 * sxgbe_drv_probe
2075 * @device: device pointer
2076 * @plat_dat: platform data pointer
2077 * @addr: iobase memory address
2078 * Description: this is the main probe function used to
2079 * call the alloc_etherdev, allocate the priv structure.
2080 */
2081struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2082 struct sxgbe_plat_data *plat_dat,
2083 void __iomem *addr)
2084{
2085 struct sxgbe_priv_data *priv;
2086 struct net_device *ndev;
2087 int ret;
2088 u8 queue_num;
2089
2090 ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
2091 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
2092 if (!ndev)
2093 return NULL;
2094
2095 SET_NETDEV_DEV(ndev, device);
2096
2097 priv = netdev_priv(ndev);
2098 priv->device = device;
2099 priv->dev = ndev;
2100
2101 sxgbe_set_ethtool_ops(ndev);
2102 priv->plat = plat_dat;
2103 priv->ioaddr = addr;
2104
2105 /* Verify driver arguments */
2106 sxgbe_verify_args();
2107
2108 /* Init MAC and get the capabilities */
2109 ret = sxgbe_hw_init(priv);
2110 if (ret)
2111 goto error_free_netdev;
2112
2113 /* allocate memory resources for Descriptor rings */
2114 ret = txring_mem_alloc(priv);
2115 if (ret)
2116 goto error_free_netdev;
2117
2118 ret = rxring_mem_alloc(priv);
2119 if (ret)
2120 goto error_free_netdev;
2121
2122 ndev->netdev_ops = &sxgbe_netdev_ops;
2123
2124 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2125 NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
2126 NETIF_F_GRO;
2127 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2128 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
2129
2130 /* assign filtering support */
2131 ndev->priv_flags |= IFF_UNICAST_FLT;
2132
2133 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2134
2135 /* Enable TCP segmentation offload for all DMA channels */
2136 if (priv->hw_cap.tcpseg_offload) {
2137 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
2138 priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
2139 }
2140 }
2141
2142 /* Enable Rx checksum offload */
2143 if (priv->hw_cap.rx_csum_offload) {
2144 priv->hw->mac->enable_rx_csum(priv->ioaddr);
2145 priv->rxcsum_insertion = true;
2146 }
2147
2148 /* Initialise pause frame settings */
2149 priv->rx_pause = 1;
2150 priv->tx_pause = 1;
2151
2152 /* Rx Watchdog is available, enable depend on platform data */
2153 if (!priv->plat->riwt_off) {
2154 priv->use_riwt = 1;
2155 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2156 }
2157
2158 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
2159
2160 spin_lock_init(&priv->stats_lock);
2161
2162 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
2163 if (IS_ERR(priv->sxgbe_clk)) {
2164 netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
2165 __func__);
2166 goto error_clk_get;
2167 }
2168
2169 /* If a specific clk_csr value is passed from the platform
2170 * this means that the CSR Clock Range selection cannot be
2171 * changed at run-time and it is fixed. Viceversa the driver'll try to
2172 * set the MDC clock dynamically according to the csr actual
2173 * clock input.
2174 */
2175 if (!priv->plat->clk_csr)
2176 sxgbe_clk_csr_set(priv);
2177 else
2178 priv->clk_csr = priv->plat->clk_csr;
2179
2180 /* MDIO bus Registration */
2181 ret = sxgbe_mdio_register(ndev);
2182 if (ret < 0) {
2183 netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
2184 __func__, priv->plat->bus_id);
2185 goto error_mdio_register;
2186 }
2187
2188 ret = register_netdev(ndev);
2189 if (ret) {
2190 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2191 goto error_netdev_register;
2192 }
2193
2194 sxgbe_check_ether_addr(priv);
2195
2196 return priv;
2197
2198error_mdio_register:
2199 clk_put(priv->sxgbe_clk);
2200error_clk_get:
2201error_netdev_register:
2202 netif_napi_del(&priv->napi);
2203error_free_netdev:
2204 free_netdev(ndev);
2205
2206 return NULL;
2207}
2208
2209/**
2210 * sxgbe_drv_remove
2211 * @ndev: net device pointer
2212 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2213 * changes the link status, releases the DMA descriptor rings.
2214 */
2215int sxgbe_drv_remove(struct net_device *ndev)
2216{
2217 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2218
2219 netdev_info(ndev, "%s: removing driver\n", __func__);
2220
2221 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2222 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2223
2224 priv->hw->mac->enable_tx(priv->ioaddr, false);
2225 priv->hw->mac->enable_rx(priv->ioaddr, false);
2226
2227 netif_napi_del(&priv->napi);
2228
2229 sxgbe_mdio_unregister(ndev);
2230
2231 unregister_netdev(ndev);
2232
2233 free_netdev(ndev);
2234
2235 return 0;
2236}
2237
2238#ifdef CONFIG_PM
2239int sxgbe_suspend(struct net_device *ndev)
2240{
2241 return 0;
2242}
2243
2244int sxgbe_resume(struct net_device *ndev)
2245{
2246 return 0;
2247}
2248
2249int sxgbe_freeze(struct net_device *ndev)
2250{
2251 return -ENOSYS;
2252}
2253
2254int sxgbe_restore(struct net_device *ndev)
2255{
2256 return -ENOSYS;
2257}
2258#endif /* CONFIG_PM */
2259
2260/* Driver is configured as Platform driver */
2261static int __init sxgbe_init(void)
2262{
2263 int ret;
2264
2265 ret = sxgbe_register_platform();
2266 if (ret)
2267 goto err;
2268 return 0;
2269err:
2270 pr_err("driver registration failed\n");
2271 return ret;
2272}
2273
2274static void __exit sxgbe_exit(void)
2275{
2276 sxgbe_unregister_platform();
2277}
2278
2279module_init(sxgbe_init);
2280module_exit(sxgbe_exit);
2281
2282#ifndef MODULE
2283static int __init sxgbe_cmdline_opt(char *str)
2284{
2285 char *opt;
2286
2287 if (!str || !*str)
2288 return -EINVAL;
2289 while ((opt = strsep(&str, ",")) != NULL) {
2290 if (!strncmp(opt, "eee_timer:", 6)) {
2291 if (kstrtoint(opt + 10, 0, &eee_timer))
2292 goto err;
2293 }
2294 }
2295 return 0;
2296
2297err:
2298 pr_err("%s: ERROR broken module parameter conversion\n", __func__);
2299 return -EINVAL;
2300}
2301
2302__setup("sxgbeeth=", sxgbe_cmdline_opt);
2303#endif /* MODULE */
2304
2305
2306
2307MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
2308
2309MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2310MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
2311
2312MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2313MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2314MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2315MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2316
2317MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
new file mode 100644
index 000000000000..01af2cbb479d
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -0,0 +1,244 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/io.h>
16#include <linux/mii.h>
17#include <linux/netdevice.h>
18#include <linux/platform_device.h>
19#include <linux/phy.h>
20#include <linux/slab.h>
21#include <linux/sxgbe_platform.h>
22
23#include "sxgbe_common.h"
24#include "sxgbe_reg.h"
25
26#define SXGBE_SMA_WRITE_CMD 0x01 /* write command */
27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
28#define SXGBE_SMA_READ_CMD 0x03 /* read command */
29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
30#define SXGBE_MII_BUSY 0x00800000 /* mii busy */
31
32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
33{
34 unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */
35
36 while (!time_after(jiffies, fin_time)) {
37 if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY))
38 return 0;
39 cpu_relax();
40 }
41
42 return -EBUSY;
43}
44
45static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
46 u16 phydata)
47{
48 u32 reg = phydata;
49
50 reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM |
51 ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY;
52 writel(reg, sp->ioaddr + sp->hw->mii.data);
53}
54
55static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
56 int phyreg, u16 phydata)
57{
58 u32 reg;
59
60 /* set mdio address register */
61 reg = ((phyreg >> 16) & 0x1f) << 21;
62 reg |= (phyaddr << 16) | (phyreg & 0xffff);
63 writel(reg, sp->ioaddr + sp->hw->mii.addr);
64
65 sxgbe_mdio_ctrl_data(sp, cmd, phydata);
66}
67
68static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
69 int phyreg, u16 phydata)
70{
71 u32 reg;
72
73 writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG);
74
75 /* set mdio address register */
76 reg = (phyaddr << 16) | (phyreg & 0x1f);
77 writel(reg, sp->ioaddr + sp->hw->mii.addr);
78
79 sxgbe_mdio_ctrl_data(sp, cmd, phydata);
80}
81
82static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
83 int phyreg, u16 phydata)
84{
85 const struct mii_regs *mii = &sp->hw->mii;
86 int rc;
87
88 rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
89 if (rc < 0)
90 return rc;
91
92 if (phyreg & MII_ADDR_C45) {
93 sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
94 } else {
95 /* Ports 0-3 only support C22. */
96 if (phyaddr >= 4)
97 return -ENODEV;
98
99 sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
100 }
101
102 return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
103}
104
105/**
106 * sxgbe_mdio_read
107 * @bus: points to the mii_bus structure
108 * @phyaddr: address of phy port
109 * @phyreg: address of register with in phy register
110 * Description: this function used for C45 and C22 MDIO Read
111 */
112static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
113{
114 struct net_device *ndev = bus->priv;
115 struct sxgbe_priv_data *priv = netdev_priv(ndev);
116 int rc;
117
118 rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
119 if (rc < 0)
120 return rc;
121
122 return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
123}
124
125/**
126 * sxgbe_mdio_write
127 * @bus: points to the mii_bus structure
128 * @phyaddr: address of phy port
129 * @phyreg: address of phy registers
130 * @phydata: data to be written into phy register
131 * Description: this function is used for C45 and C22 MDIO write
132 */
133static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
134 u16 phydata)
135{
136 struct net_device *ndev = bus->priv;
137 struct sxgbe_priv_data *priv = netdev_priv(ndev);
138
139 return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
140 phydata);
141}
142
143int sxgbe_mdio_register(struct net_device *ndev)
144{
145 struct mii_bus *mdio_bus;
146 struct sxgbe_priv_data *priv = netdev_priv(ndev);
147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
148 int err, phy_addr;
149 int *irqlist;
150 bool act;
151
152 /* allocate the new mdio bus */
153 mdio_bus = mdiobus_alloc();
154 if (!mdio_bus) {
155 netdev_err(ndev, "%s: mii bus allocation failed\n", __func__);
156 return -ENOMEM;
157 }
158
159 if (mdio_data->irqs)
160 irqlist = mdio_data->irqs;
161 else
162 irqlist = priv->mii_irq;
163
164 /* assign mii bus fields */
165 mdio_bus->name = "samsxgbe";
166 mdio_bus->read = &sxgbe_mdio_read;
167 mdio_bus->write = &sxgbe_mdio_write;
168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
169 mdio_bus->name, priv->plat->bus_id);
170 mdio_bus->priv = ndev;
171 mdio_bus->phy_mask = mdio_data->phy_mask;
172 mdio_bus->parent = priv->device;
173
174 /* register with kernel subsystem */
175 err = mdiobus_register(mdio_bus);
176 if (err != 0) {
177 netdev_err(ndev, "mdiobus register failed\n");
178 goto mdiobus_err;
179 }
180
181 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
182 struct phy_device *phy = mdio_bus->phy_map[phy_addr];
183
184 if (phy) {
185 char irq_num[4];
186 char *irq_str;
187 /* If an IRQ was provided to be assigned after
188 * the bus probe, do it here.
189 */
190 if ((mdio_data->irqs == NULL) &&
191 (mdio_data->probed_phy_irq > 0)) {
192 irqlist[phy_addr] = mdio_data->probed_phy_irq;
193 phy->irq = mdio_data->probed_phy_irq;
194 }
195
196 /* If we're going to bind the MAC to this PHY bus,
197 * and no PHY number was provided to the MAC,
198 * use the one probed here.
199 */
200 if (priv->plat->phy_addr == -1)
201 priv->plat->phy_addr = phy_addr;
202
203 act = (priv->plat->phy_addr == phy_addr);
204 switch (phy->irq) {
205 case PHY_POLL:
206 irq_str = "POLL";
207 break;
208 case PHY_IGNORE_INTERRUPT:
209 irq_str = "IGNORE";
210 break;
211 default:
212 sprintf(irq_num, "%d", phy->irq);
213 irq_str = irq_num;
214 break;
215 }
216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
217 phy->phy_id, phy_addr, irq_str,
218 dev_name(&phy->dev), act ? " active" : "");
219 }
220 }
221
222 priv->mii = mdio_bus;
223
224 return 0;
225
226mdiobus_err:
227 mdiobus_free(mdio_bus);
228 return err;
229}
230
231int sxgbe_mdio_unregister(struct net_device *ndev)
232{
233 struct sxgbe_priv_data *priv = netdev_priv(ndev);
234
235 if (!priv->mii)
236 return 0;
237
238 mdiobus_unregister(priv->mii);
239 priv->mii->priv = NULL;
240 mdiobus_free(priv->mii);
241 priv->mii = NULL;
242
243 return 0;
244}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
new file mode 100644
index 000000000000..324681c2bb74
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
@@ -0,0 +1,254 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/io.h>
16#include <linux/errno.h>
17#include <linux/export.h>
18#include <linux/jiffies.h>
19
20#include "sxgbe_mtl.h"
21#include "sxgbe_reg.h"
22
23static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
24 unsigned int raa)
25{
26 u32 reg_val;
27
28 reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
29 reg_val &= ETS_RST;
30
31 /* ETS Algorith */
32 switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
33 case ETS_WRR:
34 reg_val &= ETS_WRR;
35 break;
36 case ETS_WFQ:
37 reg_val |= ETS_WFQ;
38 break;
39 case ETS_DWRR:
40 reg_val |= ETS_DWRR;
41 break;
42 }
43 writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
44
45 switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
46 case RAA_SP:
47 reg_val &= RAA_SP;
48 break;
49 case RAA_WSP:
50 reg_val |= RAA_WSP;
51 break;
52 }
53 writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
54}
55
56/* For Dynamic DMA channel mapping for Rx queue */
57static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
58{
59 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
60 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
61 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
62}
63
64static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
65 int queue_fifo)
66{
67 u32 fifo_bits, reg_val;
68
69 /* 0 means 256 bytes */
70 fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
71 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
72 reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
73 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
74}
75
76static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
77 int queue_fifo)
78{
79 u32 fifo_bits, reg_val;
80
81 /* 0 means 256 bytes */
82 fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
83 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
84 reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
85 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
86}
87
88static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
89{
90 u32 reg_val;
91
92 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
93 reg_val |= SXGBE_MTL_ENABLE_QUEUE;
94 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
95}
96
97static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
98{
99 u32 reg_val;
100
101 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
102 reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
103 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
104}
105
106static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
107 int threshold)
108{
109 u32 reg_val;
110
111 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
112 reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
113 reg_val |= (threshold << RX_FC_ACTIVE);
114
115 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
116}
117
118static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
119{
120 u32 reg_val;
121
122 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
123 reg_val |= SXGBE_MTL_ENABLE_FC;
124 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
125}
126
127static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
128 int threshold)
129{
130 u32 reg_val;
131
132 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
133 reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
134 reg_val |= (threshold << RX_FC_DEACTIVE);
135
136 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
137}
138
139static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
140{
141 u32 reg_val;
142
143 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
144 reg_val |= SXGBE_MTL_RXQ_OP_FEP;
145
146 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
147}
148
149static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
150{
151 u32 reg_val;
152
153 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
154 reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
155
156 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
157}
158
159static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
160{
161 u32 reg_val;
162
163 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
164 reg_val |= SXGBE_MTL_RXQ_OP_FUP;
165
166 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
167}
168
169static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
170{
171 u32 reg_val;
172
173 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
174 reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
175
176 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
177}
178
179
180static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
181 int tx_mode)
182{
183 u32 reg_val;
184
185 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
186 /* TX specific MTL mode settings */
187 if (tx_mode == SXGBE_MTL_SFMODE) {
188 reg_val |= SXGBE_MTL_SFMODE;
189 } else {
190 /* set the TTC values */
191 if (tx_mode <= 64)
192 reg_val |= MTL_CONTROL_TTC_64;
193 else if (tx_mode <= 96)
194 reg_val |= MTL_CONTROL_TTC_96;
195 else if (tx_mode <= 128)
196 reg_val |= MTL_CONTROL_TTC_128;
197 else if (tx_mode <= 192)
198 reg_val |= MTL_CONTROL_TTC_192;
199 else if (tx_mode <= 256)
200 reg_val |= MTL_CONTROL_TTC_256;
201 else if (tx_mode <= 384)
202 reg_val |= MTL_CONTROL_TTC_384;
203 else
204 reg_val |= MTL_CONTROL_TTC_512;
205 }
206
207 /* write into TXQ operation register */
208 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
209}
210
211static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
212 int rx_mode)
213{
214 u32 reg_val;
215
216 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
217 /* RX specific MTL mode settings */
218 if (rx_mode == SXGBE_RX_MTL_SFMODE) {
219 reg_val |= SXGBE_RX_MTL_SFMODE;
220 } else {
221 if (rx_mode <= 64)
222 reg_val |= MTL_CONTROL_RTC_64;
223 else if (rx_mode <= 96)
224 reg_val |= MTL_CONTROL_RTC_96;
225 else if (rx_mode <= 128)
226 reg_val |= MTL_CONTROL_RTC_128;
227 }
228
229 /* write into RXQ operation register */
230 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
231}
232
233static const struct sxgbe_mtl_ops mtl_ops = {
234 .mtl_set_txfifosize = sxgbe_mtl_set_txfifosize,
235 .mtl_set_rxfifosize = sxgbe_mtl_set_rxfifosize,
236 .mtl_enable_txqueue = sxgbe_mtl_enable_txqueue,
237 .mtl_disable_txqueue = sxgbe_mtl_disable_txqueue,
238 .mtl_dynamic_dma_rxqueue = sxgbe_mtl_dma_dm_rxqueue,
239 .set_tx_mtl_mode = sxgbe_set_tx_mtl_mode,
240 .set_rx_mtl_mode = sxgbe_set_rx_mtl_mode,
241 .mtl_init = sxgbe_mtl_init,
242 .mtl_fc_active = sxgbe_mtl_fc_active,
243 .mtl_fc_deactive = sxgbe_mtl_fc_deactive,
244 .mtl_fc_enable = sxgbe_mtl_fc_enable,
245 .mtl_fep_enable = sxgbe_mtl_fep_enable,
246 .mtl_fep_disable = sxgbe_mtl_fep_disable,
247 .mtl_fup_enable = sxgbe_mtl_fup_enable,
248 .mtl_fup_disable = sxgbe_mtl_fup_disable
249};
250
251const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
252{
253 return &mtl_ops;
254}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
new file mode 100644
index 000000000000..7e4810c4137e
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
@@ -0,0 +1,104 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_MTL_H__
13#define __SXGBE_MTL_H__
14
15#define SXGBE_MTL_OPMODE_ESTMASK 0x3
16#define SXGBE_MTL_OPMODE_RAAMASK 0x1
17#define SXGBE_MTL_FCMASK 0x7
18#define SXGBE_MTL_TX_FIFO_DIV 256
19#define SXGBE_MTL_RX_FIFO_DIV 256
20
21#define SXGBE_MTL_RXQ_OP_FEP BIT(4)
22#define SXGBE_MTL_RXQ_OP_FUP BIT(3)
23#define SXGBE_MTL_ENABLE_FC 0x80
24
25#define ETS_WRR 0xFFFFFF9F
26#define ETS_RST 0xFFFFFF9F
27#define ETS_WFQ 0x00000020
28#define ETS_DWRR 0x00000040
29#define RAA_SP 0xFFFFFFFB
30#define RAA_WSP 0x00000004
31
32#define RX_QUEUE_DYNAMIC 0x80808080
33#define RX_FC_ACTIVE 8
34#define RX_FC_DEACTIVE 13
35
36enum ttc_control {
37 MTL_CONTROL_TTC_64 = 0x00000000,
38 MTL_CONTROL_TTC_96 = 0x00000020,
39 MTL_CONTROL_TTC_128 = 0x00000030,
40 MTL_CONTROL_TTC_192 = 0x00000040,
41 MTL_CONTROL_TTC_256 = 0x00000050,
42 MTL_CONTROL_TTC_384 = 0x00000060,
43 MTL_CONTROL_TTC_512 = 0x00000070,
44};
45
46enum rtc_control {
47 MTL_CONTROL_RTC_64 = 0x00000000,
48 MTL_CONTROL_RTC_96 = 0x00000002,
49 MTL_CONTROL_RTC_128 = 0x00000003,
50};
51
52enum flow_control_th {
53 MTL_FC_FULL_1K = 0x00000000,
54 MTL_FC_FULL_2K = 0x00000001,
55 MTL_FC_FULL_4K = 0x00000002,
56 MTL_FC_FULL_5K = 0x00000003,
57 MTL_FC_FULL_6K = 0x00000004,
58 MTL_FC_FULL_8K = 0x00000005,
59 MTL_FC_FULL_16K = 0x00000006,
60 MTL_FC_FULL_24K = 0x00000007,
61};
62
63struct sxgbe_mtl_ops {
64 void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg,
65 unsigned int raa);
66
67 void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num,
68 int mtl_fifo);
69
70 void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num,
71 int queue_fifo);
72
73 void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num);
74
75 void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num);
76
77 void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num,
78 int tx_mode);
79
80 void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num,
81 int rx_mode);
82
83 void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr);
84
85 void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num,
86 int threshold);
87
88 void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num,
89 int threshold);
90
91 void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num);
92
93 void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num);
94
95 void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num);
96
97 void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num);
98
99 void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num);
100};
101
102const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
103
104#endif /* __SXGBE_MTL_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
new file mode 100644
index 000000000000..b147d469a799
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -0,0 +1,259 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/etherdevice.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/of.h>
20#include <linux/of_irq.h>
21#include <linux/of_net.h>
22#include <linux/phy.h>
23#include <linux/platform_device.h>
24#include <linux/sxgbe_platform.h>
25
26#include "sxgbe_common.h"
27#include "sxgbe_reg.h"
28
29#ifdef CONFIG_OF
30static int sxgbe_probe_config_dt(struct platform_device *pdev,
31 struct sxgbe_plat_data *plat,
32 const char **mac)
33{
34 struct device_node *np = pdev->dev.of_node;
35 struct sxgbe_dma_cfg *dma_cfg;
36
37 if (!np)
38 return -ENODEV;
39
40 *mac = of_get_mac_address(np);
41 plat->interface = of_get_phy_mode(np);
42
43 plat->bus_id = of_alias_get_id(np, "ethernet");
44 if (plat->bus_id < 0)
45 plat->bus_id = 0;
46
47 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
48 sizeof(*plat->mdio_bus_data),
49 GFP_KERNEL);
50
51 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
52 if (!dma_cfg)
53 return -ENOMEM;
54
55 plat->dma_cfg = dma_cfg;
56 of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl);
57 if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0)
58 dma_cfg->fixed_burst = true;
59
60 return 0;
61}
62#else
63static int sxgbe_probe_config_dt(struct platform_device *pdev,
64 struct sxgbe_plat_data *plat,
65 const char **mac)
66{
67 return -ENOSYS;
68}
69#endif /* CONFIG_OF */
70
71/**
72 * sxgbe_platform_probe
73 * @pdev: platform device pointer
74 * Description: platform_device probe function. It allocates
75 * the necessary resources and invokes the main to init
76 * the net device, register the mdio bus etc.
77 */
78static int sxgbe_platform_probe(struct platform_device *pdev)
79{
80 int ret;
81 int i, chan;
82 struct resource *res;
83 struct device *dev = &pdev->dev;
84 void __iomem *addr;
85 struct sxgbe_priv_data *priv = NULL;
86 struct sxgbe_plat_data *plat_dat = NULL;
87 const char *mac = NULL;
88 struct net_device *ndev = platform_get_drvdata(pdev);
89 struct device_node *node = dev->of_node;
90
91 /* Get memory resource */
92 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
93 if (!res)
94 goto err_out;
95
96 addr = devm_ioremap_resource(dev, res);
97 if (IS_ERR(addr))
98 return PTR_ERR(addr);
99
100 if (pdev->dev.of_node) {
101 plat_dat = devm_kzalloc(&pdev->dev,
102 sizeof(struct sxgbe_plat_data),
103 GFP_KERNEL);
104 if (!plat_dat)
105 return -ENOMEM;
106
107 ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac);
108 if (ret) {
109 pr_err("%s: main dt probe failed\n", __func__);
110 return ret;
111 }
112 }
113
114 /* Get MAC address if available (DT) */
115 if (mac)
116 ether_addr_copy(priv->dev->dev_addr, mac);
117
118 priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
119 if (!priv) {
120 pr_err("%s: main driver probe failed\n", __func__);
121 goto err_out;
122 }
123
124 /* Get the SXGBE common INT information */
125 priv->irq = irq_of_parse_and_map(node, 0);
126 if (priv->irq <= 0) {
127 dev_err(dev, "sxgbe common irq parsing failed\n");
128 goto err_drv_remove;
129 }
130
131 /* Get the TX/RX IRQ numbers */
132 for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
133 priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
134 if (priv->txq[i]->irq_no <= 0) {
135 dev_err(dev, "sxgbe tx irq parsing failed\n");
136 goto err_tx_irq_unmap;
137 }
138 }
139
140 for (i = 0; i < SXGBE_RX_QUEUES; i++) {
141 priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
142 if (priv->rxq[i]->irq_no <= 0) {
143 dev_err(dev, "sxgbe rx irq parsing failed\n");
144 goto err_rx_irq_unmap;
145 }
146 }
147
148 priv->lpi_irq = irq_of_parse_and_map(node, chan);
149 if (priv->lpi_irq <= 0) {
150 dev_err(dev, "sxgbe lpi irq parsing failed\n");
151 goto err_rx_irq_unmap;
152 }
153
154 platform_set_drvdata(pdev, priv->dev);
155
156 pr_debug("platform driver registration completed\n");
157
158 return 0;
159
160err_rx_irq_unmap:
161 while (--i)
162 irq_dispose_mapping(priv->rxq[i]->irq_no);
163 i = SXGBE_TX_QUEUES;
164err_tx_irq_unmap:
165 while (--i)
166 irq_dispose_mapping(priv->txq[i]->irq_no);
167 irq_dispose_mapping(priv->irq);
168err_drv_remove:
169 sxgbe_drv_remove(ndev);
170err_out:
171 return -ENODEV;
172}
173
174/**
175 * sxgbe_platform_remove
176 * @pdev: platform device pointer
177 * Description: this function calls the main to free the net resources
178 * and calls the platforms hook and release the resources (e.g. mem).
179 */
180static int sxgbe_platform_remove(struct platform_device *pdev)
181{
182 struct net_device *ndev = platform_get_drvdata(pdev);
183 int ret = sxgbe_drv_remove(ndev);
184
185 return ret;
186}
187
188#ifdef CONFIG_PM
189static int sxgbe_platform_suspend(struct device *dev)
190{
191 struct net_device *ndev = dev_get_drvdata(dev);
192
193 return sxgbe_suspend(ndev);
194}
195
196static int sxgbe_platform_resume(struct device *dev)
197{
198 struct net_device *ndev = dev_get_drvdata(dev);
199
200 return sxgbe_resume(ndev);
201}
202
203static int sxgbe_platform_freeze(struct device *dev)
204{
205 struct net_device *ndev = dev_get_drvdata(dev);
206
207 return sxgbe_freeze(ndev);
208}
209
210static int sxgbe_platform_restore(struct device *dev)
211{
212 struct net_device *ndev = dev_get_drvdata(dev);
213
214 return sxgbe_restore(ndev);
215}
216
217static const struct dev_pm_ops sxgbe_platform_pm_ops = {
218 .suspend = sxgbe_platform_suspend,
219 .resume = sxgbe_platform_resume,
220 .freeze = sxgbe_platform_freeze,
221 .thaw = sxgbe_platform_restore,
222 .restore = sxgbe_platform_restore,
223};
224#else
225static const struct dev_pm_ops sxgbe_platform_pm_ops;
226#endif /* CONFIG_PM */
227
228static const struct of_device_id sxgbe_dt_ids[] = {
229 { .compatible = "samsung,sxgbe-v2.0a"},
230 { /* sentinel */ }
231};
232MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
233
234static struct platform_driver sxgbe_platform_driver = {
235 .probe = sxgbe_platform_probe,
236 .remove = sxgbe_platform_remove,
237 .driver = {
238 .name = SXGBE_RESOURCE_NAME,
239 .owner = THIS_MODULE,
240 .pm = &sxgbe_platform_pm_ops,
241 .of_match_table = of_match_ptr(sxgbe_dt_ids),
242 },
243};
244
245int sxgbe_register_platform(void)
246{
247 int err;
248
249 err = platform_driver_register(&sxgbe_platform_driver);
250 if (err)
251 pr_err("failed to register the platform driver\n");
252
253 return err;
254}
255
256void sxgbe_unregister_platform(void)
257{
258 platform_driver_unregister(&sxgbe_platform_driver);
259}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
new file mode 100644
index 000000000000..5a89acb4c505
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -0,0 +1,488 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_REGMAP_H__
13#define __SXGBE_REGMAP_H__
14
15/* SXGBE MAC Registers */
16#define SXGBE_CORE_TX_CONFIG_REG 0x0000
17#define SXGBE_CORE_RX_CONFIG_REG 0x0004
18#define SXGBE_CORE_PKT_FILTER_REG 0x0008
19#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C
20#define SXGBE_CORE_HASH_TABLE_REG0 0x0010
21#define SXGBE_CORE_HASH_TABLE_REG1 0x0014
22#define SXGBE_CORE_HASH_TABLE_REG2 0x0018
23#define SXGBE_CORE_HASH_TABLE_REG3 0x001C
24#define SXGBE_CORE_HASH_TABLE_REG4 0x0020
25#define SXGBE_CORE_HASH_TABLE_REG5 0x0024
26#define SXGBE_CORE_HASH_TABLE_REG6 0x0028
27#define SXGBE_CORE_HASH_TABLE_REG7 0x002C
28
29/* EEE-LPI Registers */
30#define SXGBE_CORE_LPI_CTRL_STATUS 0x00D0
31#define SXGBE_CORE_LPI_TIMER_CTRL 0x00D4
32
33/* VLAN Specific Registers */
34#define SXGBE_CORE_VLAN_TAG_REG 0x0050
35#define SXGBE_CORE_VLAN_HASHTAB_REG 0x0058
36#define SXGBE_CORE_VLAN_INSCTL_REG 0x0060
37#define SXGBE_CORE_VLAN_INNERCTL_REG 0x0064
38#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C
39
40/* Flow Contol Registers */
41#define SXGBE_CORE_TX_Q0_FLOWCTL_REG 0x0070
42#define SXGBE_CORE_TX_Q1_FLOWCTL_REG 0x0074
43#define SXGBE_CORE_TX_Q2_FLOWCTL_REG 0x0078
44#define SXGBE_CORE_TX_Q3_FLOWCTL_REG 0x007C
45#define SXGBE_CORE_TX_Q4_FLOWCTL_REG 0x0080
46#define SXGBE_CORE_TX_Q5_FLOWCTL_REG 0x0084
47#define SXGBE_CORE_TX_Q6_FLOWCTL_REG 0x0088
48#define SXGBE_CORE_TX_Q7_FLOWCTL_REG 0x008C
49#define SXGBE_CORE_RX_FLOWCTL_REG 0x0090
50#define SXGBE_CORE_RX_CTL0_REG 0x00A0
51#define SXGBE_CORE_RX_CTL1_REG 0x00A4
52#define SXGBE_CORE_RX_CTL2_REG 0x00A8
53#define SXGBE_CORE_RX_CTL3_REG 0x00AC
54
55/* Interrupt Registers */
56#define SXGBE_CORE_INT_STATUS_REG 0x00B0
57#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
58#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8
59#define SXGBE_CORE_PMT_CTL_STATUS_REG 0x00C0
60#define SXGBE_CORE_RWK_PKT_FILTER_REG 0x00C4
61#define SXGBE_CORE_VERSION_REG 0x0110
62#define SXGBE_CORE_DEBUG_REG 0x0114
63#define SXGBE_CORE_HW_FEA_REG(index) (0x011C + index * 4)
64
65/* SMA(MDIO) module registers */
66#define SXGBE_MDIO_SCMD_ADD_REG 0x0200
67#define SXGBE_MDIO_SCMD_DATA_REG 0x0204
68#define SXGBE_MDIO_CCMD_WADD_REG 0x0208
69#define SXGBE_MDIO_CCMD_WDATA_REG 0x020C
70#define SXGBE_MDIO_CSCAN_PORT_REG 0x0210
71#define SXGBE_MDIO_INT_STATUS_REG 0x0214
72#define SXGBE_MDIO_INT_ENABLE_REG 0x0218
73#define SXGBE_MDIO_PORT_CONDCON_REG 0x021C
74#define SXGBE_MDIO_CLAUSE22_PORT_REG 0x0220
75
76/* port specific, addr = 0-3 */
77#define SXGBE_MDIO_DEV_BASE_REG 0x0230
78#define SXGBE_MDIO_PORT_DEV_REG(addr) \
79 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0)
80#define SXGBE_MDIO_PORT_LSTATUS_REG(addr) \
81 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4)
82#define SXGBE_MDIO_PORT_ALIVE_REG(addr) \
83 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8)
84
85#define SXGBE_CORE_GPIO_CTL_REG 0x0278
86#define SXGBE_CORE_GPIO_STATUS_REG 0x027C
87
88/* Address registers for filtering */
89#define SXGBE_CORE_ADD_BASE_REG 0x0300
90
91/* addr = 0-31 */
92#define SXGBE_CORE_ADD_HIGHOFFSET(addr) \
93 (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0)
94#define SXGBE_CORE_ADD_LOWOFFSET(addr) \
95 (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4)
96
97/* SXGBE MMC registers */
98#define SXGBE_MMC_CTL_REG 0x0800
99#define SXGBE_MMC_RXINT_STATUS_REG 0x0804
100#define SXGBE_MMC_TXINT_STATUS_REG 0x0808
101#define SXGBE_MMC_RXINT_ENABLE_REG 0x080C
102#define SXGBE_MMC_TXINT_ENABLE_REG 0x0810
103
104/* TX specific counters */
105#define SXGBE_MMC_TXOCTETHI_GBCNT_REG 0x0814
106#define SXGBE_MMC_TXOCTETLO_GBCNT_REG 0x0818
107#define SXGBE_MMC_TXFRAMELO_GBCNT_REG 0x081C
108#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG 0x0820
109#define SXGBE_MMC_TXBROADLO_GCNT_REG 0x0824
110#define SXGBE_MMC_TXBROADHI_GCNT_REG 0x0828
111#define SXGBE_MMC_TXMULTILO_GCNT_REG 0x082C
112#define SXGBE_MMC_TXMULTIHI_GCNT_REG 0x0830
113#define SXGBE_MMC_TX64LO_GBCNT_REG 0x0834
114#define SXGBE_MMC_TX64HI_GBCNT_REG 0x0838
115#define SXGBE_MMC_TX65TO127LO_GBCNT_REG 0x083C
116#define SXGBE_MMC_TX65TO127HI_GBCNT_REG 0x0840
117#define SXGBE_MMC_TX128TO255LO_GBCNT_REG 0x0844
118#define SXGBE_MMC_TX128TO255HI_GBCNT_REG 0x0848
119#define SXGBE_MMC_TX256TO511LO_GBCNT_REG 0x084C
120#define SXGBE_MMC_TX256TO511HI_GBCNT_REG 0x0850
121#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG 0x0854
122#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG 0x0858
123#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG 0x085C
124#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG 0x0860
125#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG 0x0864
126#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG 0x0868
127#define SXGBE_MMC_TXMULTILO_GBCNT_REG 0x086C
128#define SXGBE_MMC_TXMULTIHI_GBCNT_REG 0x0870
129#define SXGBE_MMC_TXBROADLO_GBCNT_REG 0x0874
130#define SXGBE_MMC_TXBROADHI_GBCNT_REG 0x0878
131#define SXGBE_MMC_TXUFLWLO_GBCNT_REG 0x087C
132#define SXGBE_MMC_TXUFLWHI_GBCNT_REG 0x0880
133#define SXGBE_MMC_TXOCTETLO_GCNT_REG 0x0884
134#define SXGBE_MMC_TXOCTETHI_GCNT_REG 0x0888
135#define SXGBE_MMC_TXFRAMELO_GCNT_REG 0x088C
136#define SXGBE_MMC_TXFRAMEHI_GCNT_REG 0x0890
137#define SXGBE_MMC_TXPAUSELO_CNT_REG 0x0894
138#define SXGBE_MMC_TXPAUSEHI_CNT_REG 0x0898
139#define SXGBE_MMC_TXVLANLO_GCNT_REG 0x089C
140#define SXGBE_MMC_TXVLANHI_GCNT_REG 0x08A0
141
142/* RX specific counters */
143#define SXGBE_MMC_RXFRAMELO_GBCNT_REG 0x0900
144#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG 0x0904
145#define SXGBE_MMC_RXOCTETLO_GBCNT_REG 0x0908
146#define SXGBE_MMC_RXOCTETHI_GBCNT_REG 0x090C
147#define SXGBE_MMC_RXOCTETLO_GCNT_REG 0x0910
148#define SXGBE_MMC_RXOCTETHI_GCNT_REG 0x0914
149#define SXGBE_MMC_RXBROADLO_GCNT_REG 0x0918
150#define SXGBE_MMC_RXBROADHI_GCNT_REG 0x091C
151#define SXGBE_MMC_RXMULTILO_GCNT_REG 0x0920
152#define SXGBE_MMC_RXMULTIHI_GCNT_REG 0x0924
153#define SXGBE_MMC_RXCRCERRLO_REG 0x0928
154#define SXGBE_MMC_RXCRCERRHI_REG 0x092C
155#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG 0x0930
156#define SXGBE_MMC_RXJABBERERR_REG 0x0934
157#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG 0x0938
158#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG 0x093C
159#define SXGBE_MMC_RX64LO_GBCNT_REG 0x0940
160#define SXGBE_MMC_RX64HI_GBCNT_REG 0x0944
161#define SXGBE_MMC_RX65TO127LO_GBCNT_REG 0x0948
162#define SXGBE_MMC_RX65TO127HI_GBCNT_REG 0x094C
163#define SXGBE_MMC_RX128TO255LO_GBCNT_REG 0x0950
164#define SXGBE_MMC_RX128TO255HI_GBCNT_REG 0x0954
165#define SXGBE_MMC_RX256TO511LO_GBCNT_REG 0x0958
166#define SXGBE_MMC_RX256TO511HI_GBCNT_REG 0x095C
167#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG 0x0960
168#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG 0x0964
169#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG 0x0968
170#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG 0x096C
171#define SXGBE_MMC_RXUNICASTLO_GCNT_REG 0x0970
172#define SXGBE_MMC_RXUNICASTHI_GCNT_REG 0x0974
173#define SXGBE_MMC_RXLENERRLO_REG 0x0978
174#define SXGBE_MMC_RXLENERRHI_REG 0x097C
175#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG 0x0980
176#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG 0x0984
177#define SXGBE_MMC_RXPAUSELO_CNT_REG 0x0988
178#define SXGBE_MMC_RXPAUSEHI_CNT_REG 0x098C
179#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG 0x0990
180#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG 0x0994
181#define SXGBE_MMC_RXVLANLO_GBCNT_REG 0x0998
182#define SXGBE_MMC_RXVLANHI_GBCNT_REG 0x099C
183#define SXGBE_MMC_RXWATCHDOG_ERR_REG 0x09A0
184
185/* L3/L4 function registers */
186#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
187#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
188#define SXGBE_CORE_L34_DATA_REG 0x0C04
189
190/* ARP registers */
191#define SXGBE_CORE_ARP_ADD_REG 0x0C10
192
193/* RSS registers */
194#define SXGBE_CORE_RSS_CTL_REG 0x0C80
195#define SXGBE_CORE_RSS_ADD_REG 0x0C88
196#define SXGBE_CORE_RSS_DATA_REG 0x0C8C
197
198/* RSS control register bits */
199#define SXGBE_CORE_RSS_CTL_UDP4TE BIT(3)
200#define SXGBE_CORE_RSS_CTL_TCP4TE BIT(2)
201#define SXGBE_CORE_RSS_CTL_IP2TE BIT(1)
202#define SXGBE_CORE_RSS_CTL_RSSE BIT(0)
203
204/* IEEE 1588 registers */
205#define SXGBE_CORE_TSTAMP_CTL_REG 0x0D00
206#define SXGBE_CORE_SUBSEC_INC_REG 0x0D04
207#define SXGBE_CORE_SYSTIME_SEC_REG 0x0D0C
208#define SXGBE_CORE_SYSTIME_NSEC_REG 0x0D10
209#define SXGBE_CORE_SYSTIME_SECUP_REG 0x0D14
210#define SXGBE_CORE_TSTAMP_ADD_REG 0x0D18
211#define SXGBE_CORE_SYSTIME_HWORD_REG 0x0D1C
212#define SXGBE_CORE_TSTAMP_STATUS_REG 0x0D20
213#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30
214#define SXGBE_CORE_TXTIME_STATUSSEC_REG 0x0D34
215
216/* Auxiliary registers */
217#define SXGBE_CORE_AUX_CTL_REG 0x0D40
218#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG 0x0D48
219#define SXGBE_CORE_AUX_TSTAMP_SEC_REG 0x0D4C
220#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG 0x0D50
221#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG 0x0D54
222#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG 0x0D58
223#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C
224#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG 0x0D60
225#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64
226
227/* PPS registers */
228#define SXGBE_CORE_PPS_CTL_REG 0x0D70
229#define SXGBE_CORE_PPS_BASE 0x0D80
230
231/* addr = 0 - 3 */
232#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr) \
233 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0)
234#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr) \
235 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4)
236#define SXGBE_CORE_PPS_INTERVAL_REG(addr) \
237 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8)
238#define SXGBE_CORE_PPS_WIDTH_REG(addr) \
239 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC)
240#define SXGBE_CORE_PTO_CTL_REG 0x0DC0
241#define SXGBE_CORE_SRCPORT_ITY0_REG 0x0DC4
242#define SXGBE_CORE_SRCPORT_ITY1_REG 0x0DC8
243#define SXGBE_CORE_SRCPORT_ITY2_REG 0x0DCC
244#define SXGBE_CORE_LOGMSG_LEVEL_REG 0x0DD0
245
246/* SXGBE MTL Registers */
247#define SXGBE_MTL_BASE_REG 0x1000
248#define SXGBE_MTL_OP_MODE_REG (SXGBE_MTL_BASE_REG + 0x0000)
249#define SXGBE_MTL_DEBUG_CTL_REG (SXGBE_MTL_BASE_REG + 0x0008)
250#define SXGBE_MTL_DEBUG_STATUS_REG (SXGBE_MTL_BASE_REG + 0x000C)
251#define SXGBE_MTL_FIFO_DEBUGDATA_REG (SXGBE_MTL_BASE_REG + 0x0010)
252#define SXGBE_MTL_INT_STATUS_REG (SXGBE_MTL_BASE_REG + 0x0020)
253#define SXGBE_MTL_RXQ_DMAMAP0_REG (SXGBE_MTL_BASE_REG + 0x0030)
254#define SXGBE_MTL_RXQ_DMAMAP1_REG (SXGBE_MTL_BASE_REG + 0x0034)
255#define SXGBE_MTL_RXQ_DMAMAP2_REG (SXGBE_MTL_BASE_REG + 0x0038)
256#define SXGBE_MTL_TX_PRTYMAP0_REG (SXGBE_MTL_BASE_REG + 0x0040)
257#define SXGBE_MTL_TX_PRTYMAP1_REG (SXGBE_MTL_BASE_REG + 0x0044)
258
259/* TC/Queue registers, qnum=0-15 */
260#define SXGBE_MTL_TC_TXBASE_REG (SXGBE_MTL_BASE_REG + 0x0100)
261#define SXGBE_MTL_TXQ_OPMODE_REG(qnum) \
262 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00)
263#define SXGBE_MTL_SFMODE BIT(1)
264#define SXGBE_MTL_FIFO_LSHIFT 16
265#define SXGBE_MTL_ENABLE_QUEUE 0x00000008
266#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum) \
267 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04)
268#define SXGBE_MTL_TXQ_DEBUG_REG(qnum) \
269 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08)
270#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum) \
271 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10)
272#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum) \
273 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14)
274#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum) \
275 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18)
276
277#define SXGBE_MTL_TC_RXBASE_REG 0x1140
278#define SXGBE_RX_MTL_SFMODE BIT(5)
279#define SXGBE_MTL_RXQ_OPMODE_REG(qnum) \
280 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00)
281#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum) \
282 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04)
283#define SXGBE_MTL_RXQ_DEBUG_REG(qnum) \
284 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08)
285#define SXGBE_MTL_RXQ_CTL_REG(qnum) \
286 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C)
287#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum) \
288 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30)
289#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum) \
290 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34)
291
292/* SXGBE DMA Registers */
293#define SXGBE_DMA_BASE_REG 0x3000
294#define SXGBE_DMA_MODE_REG (SXGBE_DMA_BASE_REG + 0x0000)
295#define SXGBE_DMA_SOFT_RESET BIT(0)
296#define SXGBE_DMA_SYSBUS_MODE_REG (SXGBE_DMA_BASE_REG + 0x0004)
297#define SXGBE_DMA_AXI_UNDEF_BURST BIT(0)
298#define SXGBE_DMA_ENHACE_ADDR_MODE BIT(11)
299#define SXGBE_DMA_INT_STATUS_REG (SXGBE_DMA_BASE_REG + 0x0008)
300#define SXGBE_DMA_AXI_ARCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0010)
301#define SXGBE_DMA_AXI_AWCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0018)
302#define SXGBE_DMA_DEBUG_STATUS0_REG (SXGBE_DMA_BASE_REG + 0x0020)
303#define SXGBE_DMA_DEBUG_STATUS1_REG (SXGBE_DMA_BASE_REG + 0x0024)
304#define SXGBE_DMA_DEBUG_STATUS2_REG (SXGBE_DMA_BASE_REG + 0x0028)
305#define SXGBE_DMA_DEBUG_STATUS3_REG (SXGBE_DMA_BASE_REG + 0x002C)
306#define SXGBE_DMA_DEBUG_STATUS4_REG (SXGBE_DMA_BASE_REG + 0x0030)
307#define SXGBE_DMA_DEBUG_STATUS5_REG (SXGBE_DMA_BASE_REG + 0x0034)
308
309/* Channel Registers, cha_num = 0-15 */
310#define SXGBE_DMA_CHA_BASE_REG \
311 (SXGBE_DMA_BASE_REG + 0x0100)
312#define SXGBE_DMA_CHA_CTL_REG(cha_num) \
313 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00)
314#define SXGBE_DMA_PBL_X8MODE BIT(16)
315#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE BIT(12)
316#define SXGBE_DMA_CHA_TXCTL_REG(cha_num) \
317 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04)
318#define SXGBE_DMA_CHA_RXCTL_REG(cha_num) \
319 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08)
320#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num) \
321 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10)
322#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num) \
323 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14)
324#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num) \
325 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18)
326#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num) \
327 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C)
328#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num) \
329 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24)
330#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num) \
331 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C)
332#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num) \
333 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30)
334#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num) \
335 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34)
336#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num) \
337 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38)
338#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num) \
339 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C)
340#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num) \
341 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44)
342#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num) \
343 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C)
344#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num) \
345 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50)
346#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num) \
347 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54)
348#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num) \
349 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58)
350#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num) \
351 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C)
352#define SXGBE_DMA_CHA_STATUS_REG(cha_num) \
353 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60)
354
355/* TX DMA control register specific */
356#define SXGBE_TX_START_DMA BIT(0)
357
358/* sxgbe tx configuration register bitfields */
359#define SXGBE_SPEED_10G 0x0
360#define SXGBE_SPEED_2_5G 0x1
361#define SXGBE_SPEED_1G 0x2
362#define SXGBE_SPEED_LSHIFT 29
363
364#define SXGBE_TX_ENABLE BIT(0)
365#define SXGBE_TX_DISDIC_ALGO BIT(1)
366#define SXGBE_TX_JABBER_DISABLE BIT(16)
367
368/* sxgbe rx configuration register bitfields */
369#define SXGBE_RX_ENABLE BIT(0)
370#define SXGBE_RX_ACS_ENABLE BIT(1)
371#define SXGBE_RX_WATCHDOG_DISABLE BIT(7)
372#define SXGBE_RX_JUMBPKT_ENABLE BIT(8)
373#define SXGBE_RX_CSUMOFFLOAD_ENABLE BIT(9)
374#define SXGBE_RX_LOOPBACK_ENABLE BIT(10)
375#define SXGBE_RX_ARPOFFLOAD_ENABLE BIT(31)
376
377/* sxgbe vlan Tag Register bitfields */
378#define SXGBE_VLAN_SVLAN_ENABLE BIT(18)
379#define SXGBE_VLAN_DOUBLEVLAN_ENABLE BIT(26)
380#define SXGBE_VLAN_INNERVLAN_ENABLE BIT(27)
381
382/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields
383 * Below fields same for Inner VLAN Tag Inclusion
384 * Register(0x0064) register
385 */
386enum vlan_tag_ctl_tx {
387 VLAN_TAG_TX_NOP,
388 VLAN_TAG_TX_DEL,
389 VLAN_TAG_TX_INSERT,
390 VLAN_TAG_TX_REPLACE
391};
392#define SXGBE_VLAN_PRTY_CTL BIT(18)
393#define SXGBE_VLAN_CSVL_CTL BIT(19)
394
395/* SXGBE TX Q Flow Control Register bitfields */
396#define SXGBE_TX_FLOW_CTL_FCB BIT(0)
397#define SXGBE_TX_FLOW_CTL_TFB BIT(1)
398
399/* SXGBE RX Q Flow Control Register bitfields */
400#define SXGBE_RX_FLOW_CTL_ENABLE BIT(0)
401#define SXGBE_RX_UNICAST_DETECT BIT(1)
402#define SXGBE_RX_PRTYFLOW_CTL_ENABLE BIT(8)
403
404/* sxgbe rx Q control0 register bitfields */
405#define SXGBE_RX_Q_ENABLE 0x2
406
407/* SXGBE hardware features bitfield specific */
408/* Capability Register 0 */
409#define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1)
410#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4)
411#define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5)
412#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6)
413#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7)
414#define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8)
415#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9)
416#define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12)
417#define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13)
418#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14)
419#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap) ((cap & 0x00010000) >> 16)
420#define SXGBE_HW_FEAT_MACADDR_COUNT(cap) ((cap & 0x007C0000) >> 18)
421#define SXGBE_HW_FEAT_TSTMAP_SRC(cap) ((cap & 0x06000000) >> 25)
422#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap) ((cap & 0x08000000) >> 27)
423
424/* Capability Register 1 */
425#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap) ((cap & 0x0000001F))
426#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap) ((cap & 0x000007C0) >> 6)
427#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap) ((cap & 0x00002000) >> 13)
428#define SXGBE_HW_FEAT_DCB(cap) ((cap & 0x00010000) >> 16)
429#define SXGBE_HW_FEAT_SPLIT_HDR(cap) ((cap & 0x00020000) >> 17)
430#define SXGBE_HW_FEAT_TSO(cap) ((cap & 0x00040000) >> 18)
431#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap) ((cap & 0x00080000) >> 19)
432#define SXGBE_HW_FEAT_RSS(cap) ((cap & 0x00100000) >> 20)
433#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap) ((cap & 0x03000000) >> 24)
434#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap) ((cap & 0x78000000) >> 27)
435
436/* Capability Register 2 */
437#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap) ((cap & 0x0000000F))
438#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap) ((cap & 0x000003C0) >> 6)
439#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap) ((cap & 0x0000F000) >> 12)
440#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap) ((cap & 0x003C0000) >> 18)
441#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap) ((cap & 0x07000000) >> 24)
442#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap) ((cap & 0x70000000) >> 28)
443
444/* DMAchannel interrupt enable specific */
445/* DMA Normal interrupt */
446#define SXGBE_DMA_INT_ENA_NIE BIT(16) /* Normal Summary */
447#define SXGBE_DMA_INT_ENA_TIE BIT(0) /* Transmit Interrupt */
448#define SXGBE_DMA_INT_ENA_TUE BIT(2) /* Transmit Buffer Unavailable */
449#define SXGBE_DMA_INT_ENA_RIE BIT(6) /* Receive Interrupt */
450
451#define SXGBE_DMA_INT_NORMAL \
452 (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE | \
453 SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE)
454
455/* DMA Abnormal interrupt */
456#define SXGBE_DMA_INT_ENA_AIE BIT(15) /* Abnormal Summary */
457#define SXGBE_DMA_INT_ENA_TSE BIT(1) /* Transmit Stopped */
458#define SXGBE_DMA_INT_ENA_RUE BIT(7) /* Receive Buffer Unavailable */
459#define SXGBE_DMA_INT_ENA_RSE BIT(8) /* Receive Stopped */
460#define SXGBE_DMA_INT_ENA_FBE BIT(12) /* Fatal Bus Error */
461#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */
462
463#define SXGBE_DMA_INT_ABNORMAL \
464 (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE | \
465 SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE | \
466 SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE)
467
468#define SXGBE_DMA_ENA_INT (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL)
469
470/* DMA channel interrupt status specific */
471#define SXGBE_DMA_INT_STATUS_REB2 BIT(21)
472#define SXGBE_DMA_INT_STATUS_REB1 BIT(20)
473#define SXGBE_DMA_INT_STATUS_REB0 BIT(19)
474#define SXGBE_DMA_INT_STATUS_TEB2 BIT(18)
475#define SXGBE_DMA_INT_STATUS_TEB1 BIT(17)
476#define SXGBE_DMA_INT_STATUS_TEB0 BIT(16)
477#define SXGBE_DMA_INT_STATUS_NIS BIT(15)
478#define SXGBE_DMA_INT_STATUS_AIS BIT(14)
479#define SXGBE_DMA_INT_STATUS_CTXTERR BIT(13)
480#define SXGBE_DMA_INT_STATUS_FBE BIT(12)
481#define SXGBE_DMA_INT_STATUS_RPS BIT(8)
482#define SXGBE_DMA_INT_STATUS_RBU BIT(7)
483#define SXGBE_DMA_INT_STATUS_RI BIT(6)
484#define SXGBE_DMA_INT_STATUS_TBU BIT(2)
485#define SXGBE_DMA_INT_STATUS_TPS BIT(1)
486#define SXGBE_DMA_INT_STATUS_TI BIT(0)
487
488#endif /* __SXGBE_REGMAP_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
new file mode 100644
index 000000000000..51c32194ba88
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
@@ -0,0 +1,91 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/bitops.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/phy.h>
16#include "sxgbe_common.h"
17#include "sxgbe_xpcs.h"
18
19static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
20{
21 u32 value;
22 struct sxgbe_priv_data *priv = netdev_priv(ndev);
23
24 value = readl(priv->ioaddr + XPCS_OFFSET + reg);
25
26 return value;
27}
28
29static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
30{
31 struct sxgbe_priv_data *priv = netdev_priv(ndev);
32
33 writel(data, priv->ioaddr + XPCS_OFFSET + reg);
34
35 return 0;
36}
37
38int sxgbe_xpcs_init(struct net_device *ndev)
39{
40 u32 value;
41
42 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
43 /* 10G XAUI mode */
44 sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
45 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
46 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
47 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
48
49 do {
50 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
51 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
52
53 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
54 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
55
56 do {
57 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
58 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
59
60 return 0;
61}
62
63int sxgbe_xpcs_init_1G(struct net_device *ndev)
64{
65 int value;
66
67 /* 10GBASE-X PCS (1G) mode */
68 sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
69 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
70 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
71 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
72
73 value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
74 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
75 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
76 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
77 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
78
79 do {
80 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
81 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
82
83 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
84 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
85
86 /* Auto Negotiation cluase 37 enable */
87 value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
88 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
89
90 return 0;
91}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
new file mode 100644
index 000000000000..6b26a50724d3
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
@@ -0,0 +1,38 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Byungho An <bh74.an@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_XPCS_H__
13#define __SXGBE_XPCS_H__
14
15/* XPCS Registers */
16#define XPCS_OFFSET 0x1A060000
17#define SR_PCS_MMD_CONTROL1 0x030000
18#define SR_PCS_CONTROL2 0x030007
19#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004
20#define VR_PCS_MMD_DIGITAL_STATUS 0x038010
21#define SR_MII_MMD_CONTROL 0x1F0000
22#define SR_MII_MMD_AN_ADV 0x1F0004
23#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005
24#define VR_MII_MMD_AN_CONTROL 0x1F8001
25#define VR_MII_MMD_AN_INT_STATUS 0x1F8002
26
27#define XPCS_QSEQ_STATE_STABLE 0x10
28#define XPCS_QSEQ_STATE_MPLLOFF 0x1c
29#define XPCS_TYPE_SEL_R 0x00
30#define XPCS_TYPE_SEL_X 0x01
31#define XPCS_TYPE_SEL_W 0x02
32#define XPCS_XAUI_MODE 0x00
33#define XPCS_RXAUI_MODE 0x01
34
35int sxgbe_xpcs_init(struct net_device *ndev);
36int sxgbe_xpcs_init_1G(struct net_device *ndev);
37
38#endif /* __SXGBE_XPCS_H__ */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 174a92f5fe51..21c20ea0dad0 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -162,8 +162,8 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
162 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 162 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
163 return -EIO; 163 return -EIO;
164 164
165 memcpy(mac_address, 165 ether_addr_copy(mac_address,
166 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN); 166 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
167 return 0; 167 return 0;
168} 168}
169 169
@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
172 struct efx_ef10_nic_data *nic_data; 172 struct efx_ef10_nic_data *nic_data;
173 int i, rc; 173 int i, rc;
174 174
175 /* We can have one VI for each 8K region. However we need 175 /* We can have one VI for each 8K region. However, until we
176 * multiple TX queues per channel. 176 * use TX option descriptors we need two TX queues per channel.
177 */ 177 */
178 efx->max_channels = 178 efx->max_channels =
179 min_t(unsigned int, 179 min_t(unsigned int,
@@ -565,10 +565,17 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
565 * several of each (in fact that's the only option if host 565 * several of each (in fact that's the only option if host
566 * page size is >4K). So we may allocate some extra VIs just 566 * page size is >4K). So we may allocate some extra VIs just
567 * for writing PIO buffers through. 567 * for writing PIO buffers through.
568 *
569 * The UC mapping contains (min_vis - 1) complete VIs and the
570 * first half of the next VI. Then the WC mapping begins with
571 * the second half of this last VI.
568 */ 572 */
569 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + 573 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
570 ER_DZ_TX_PIOBUF); 574 ER_DZ_TX_PIOBUF);
571 if (nic_data->n_piobufs) { 575 if (nic_data->n_piobufs) {
576 /* pio_write_vi_base rounds down to give the number of complete
577 * VIs inside the UC mapping.
578 */
572 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; 579 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
573 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + 580 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
574 nic_data->n_piobufs) * 581 nic_data->n_piobufs) *
@@ -1955,6 +1962,9 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1955 int tx_descs = 0; 1962 int tx_descs = 0;
1956 int spent = 0; 1963 int spent = 0;
1957 1964
1965 if (quota <= 0)
1966 return spent;
1967
1958 read_ptr = channel->eventq_read_ptr; 1968 read_ptr = channel->eventq_read_ptr;
1959 1969
1960 for (;;) { 1970 for (;;) {
@@ -3145,12 +3155,10 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3145 table->dev_uc_count = -1; 3155 table->dev_uc_count = -1;
3146 } else { 3156 } else {
3147 table->dev_uc_count = 1 + netdev_uc_count(net_dev); 3157 table->dev_uc_count = 1 + netdev_uc_count(net_dev);
3148 memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr, 3158 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
3149 ETH_ALEN);
3150 i = 1; 3159 i = 1;
3151 netdev_for_each_uc_addr(uc, net_dev) { 3160 netdev_for_each_uc_addr(uc, net_dev) {
3152 memcpy(table->dev_uc_list[i].addr, 3161 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
3153 uc->addr, ETH_ALEN);
3154 i++; 3162 i++;
3155 } 3163 }
3156 } 3164 }
@@ -3162,8 +3170,7 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3162 eth_broadcast_addr(table->dev_mc_list[0].addr); 3170 eth_broadcast_addr(table->dev_mc_list[0].addr);
3163 i = 1; 3171 i = 1;
3164 netdev_for_each_mc_addr(mc, net_dev) { 3172 netdev_for_each_mc_addr(mc, net_dev) {
3165 memcpy(table->dev_mc_list[i].addr, 3173 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
3166 mc->addr, ETH_ALEN);
3167 i++; 3174 i++;
3168 } 3175 }
3169 } 3176 }
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 207ac9a1e3de..62a55dde61d5 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -227,36 +227,6 @@
227#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 227#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
228#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 228#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
229 229
230/* RX_USER_DESC */
231#define ESF_DZ_RX_USR_RESERVED_LBN 62
232#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
233#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
234#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
235#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
236#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
237#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
238#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
239#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
240#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
241#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
242#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
243#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
244#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
245#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
246#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
247#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
248#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
249#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
250#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
251#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
252#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
253#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
254#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
255#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
256#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
257#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
258#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
259
260/* TX_CSUM_TSTAMP_DESC */ 230/* TX_CSUM_TSTAMP_DESC */
261#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 231#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
262#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 232#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
@@ -338,37 +308,6 @@
338#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 308#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
339#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 309#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
340 310
341/* TX_USER_DESC */
342#define ESF_DZ_TX_USR_TYPE_LBN 63
343#define ESF_DZ_TX_USR_TYPE_WIDTH 1
344#define ESF_DZ_TX_USR_CONT_LBN 62
345#define ESF_DZ_TX_USR_CONT_WIDTH 1
346#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
347#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
348#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
349#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
350#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
351#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
352#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
353#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
354#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
355#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
356#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
357#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
358#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
359#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
360#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
361#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
362#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
363#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
364#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
365#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
366#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
367#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
368#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
369#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
370#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
371#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
372/*************************************************************************/ 311/*************************************************************************/
373 312
374/* TX_DESC_UPD_REG: Transmit descriptor update register. 313/* TX_DESC_UPD_REG: Transmit descriptor update register.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 83d464347021..57b971e5e6b2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
503 goto fail; 503 goto fail;
504 } 504 }
505 505
506 channel->n_rx_frm_trunc = 0;
507
508 return 0; 506 return 0;
509 507
510fail: 508fail:
@@ -1014,7 +1012,7 @@ static int efx_probe_port(struct efx_nic *efx)
1014 return rc; 1012 return rc;
1015 1013
1016 /* Initialise MAC address to permanent address */ 1014 /* Initialise MAC address to permanent address */
1017 memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN); 1015 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1018 1016
1019 return 0; 1017 return 0;
1020} 1018}
@@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1346 1344
1347 for (i = 0; i < n_channels; i++) 1345 for (i = 0; i < n_channels; i++)
1348 xentries[i].entry = i; 1346 xentries[i].entry = i;
1349 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); 1347 rc = pci_enable_msix_range(efx->pci_dev,
1350 if (rc > 0) { 1348 xentries, 1, n_channels);
1349 if (rc < 0) {
1350 /* Fall back to single channel MSI */
1351 efx->interrupt_mode = EFX_INT_MODE_MSI;
1352 netif_err(efx, drv, efx->net_dev,
1353 "could not enable MSI-X\n");
1354 } else if (rc < n_channels) {
1351 netif_err(efx, drv, efx->net_dev, 1355 netif_err(efx, drv, efx->net_dev,
1352 "WARNING: Insufficient MSI-X vectors" 1356 "WARNING: Insufficient MSI-X vectors"
1353 " available (%d < %u).\n", rc, n_channels); 1357 " available (%d < %u).\n", rc, n_channels);
1354 netif_err(efx, drv, efx->net_dev, 1358 netif_err(efx, drv, efx->net_dev,
1355 "WARNING: Performance may be reduced.\n"); 1359 "WARNING: Performance may be reduced.\n");
1356 EFX_BUG_ON_PARANOID(rc >= n_channels);
1357 n_channels = rc; 1360 n_channels = rc;
1358 rc = pci_enable_msix(efx->pci_dev, xentries,
1359 n_channels);
1360 } 1361 }
1361 1362
1362 if (rc == 0) { 1363 if (rc > 0) {
1363 efx->n_channels = n_channels; 1364 efx->n_channels = n_channels;
1364 if (n_channels > extra_channels) 1365 if (n_channels > extra_channels)
1365 n_channels -= extra_channels; 1366 n_channels -= extra_channels;
@@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1375 for (i = 0; i < efx->n_channels; i++) 1376 for (i = 0; i < efx->n_channels; i++)
1376 efx_get_channel(efx, i)->irq = 1377 efx_get_channel(efx, i)->irq =
1377 xentries[i].vector; 1378 xentries[i].vector;
1378 } else {
1379 /* Fall back to single channel MSI */
1380 efx->interrupt_mode = EFX_INT_MODE_MSI;
1381 netif_err(efx, drv, efx->net_dev,
1382 "could not enable MSI-X\n");
1383 } 1379 }
1384 } 1380 }
1385 1381
@@ -1603,6 +1599,8 @@ static int efx_probe_nic(struct efx_nic *efx)
1603 if (rc) 1599 if (rc)
1604 goto fail1; 1600 goto fail1;
1605 1601
1602 efx_set_channels(efx);
1603
1606 rc = efx->type->dimension_resources(efx); 1604 rc = efx->type->dimension_resources(efx);
1607 if (rc) 1605 if (rc)
1608 goto fail2; 1606 goto fail2;
@@ -1613,7 +1611,6 @@ static int efx_probe_nic(struct efx_nic *efx)
1613 efx->rx_indir_table[i] = 1611 efx->rx_indir_table[i] =
1614 ethtool_rxfh_indir_default(i, efx->rss_spread); 1612 ethtool_rxfh_indir_default(i, efx->rss_spread);
1615 1613
1616 efx_set_channels(efx);
1617 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1614 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1618 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 1615 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1619 1616
@@ -2115,7 +2112,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
2115{ 2112{
2116 struct efx_nic *efx = netdev_priv(net_dev); 2113 struct efx_nic *efx = netdev_priv(net_dev);
2117 struct sockaddr *addr = data; 2114 struct sockaddr *addr = data;
2118 char *new_addr = addr->sa_data; 2115 u8 *new_addr = addr->sa_data;
2119 2116
2120 if (!is_valid_ether_addr(new_addr)) { 2117 if (!is_valid_ether_addr(new_addr)) {
2121 netif_err(efx, drv, efx->net_dev, 2118 netif_err(efx, drv, efx->net_dev,
@@ -2124,7 +2121,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
2124 return -EADDRNOTAVAIL; 2121 return -EADDRNOTAVAIL;
2125 } 2122 }
2126 2123
2127 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 2124 ether_addr_copy(net_dev->dev_addr, new_addr);
2128 efx_sriov_mac_address_changed(efx); 2125 efx_sriov_mac_address_changed(efx);
2129 2126
2130 /* Reconfigure the MAC */ 2127 /* Reconfigure the MAC */
@@ -3273,6 +3270,6 @@ module_exit(efx_exit_module);
3273 3270
3274MODULE_AUTHOR("Solarflare Communications and " 3271MODULE_AUTHOR("Solarflare Communications and "
3275 "Michael Brown <mbrown@fensystems.co.uk>"); 3272 "Michael Brown <mbrown@fensystems.co.uk>");
3276MODULE_DESCRIPTION("Solarflare Communications network driver"); 3273MODULE_DESCRIPTION("Solarflare network driver");
3277MODULE_LICENSE("GPL"); 3274MODULE_LICENSE("GPL");
3278MODULE_DEVICE_TABLE(pci, efx_pci_table); 3275MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index dbd7b78fe01c..99032581336f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,7 +14,7 @@
14#include "net_driver.h" 14#include "net_driver.h"
15#include "filter.h" 15#include "filter.h"
16 16
17/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ 17/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
18#define EFX_MEM_BAR 2 18#define EFX_MEM_BAR 2
19 19
20/* TX */ 20/* TX */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 229428915aa8..0de8b07c24c2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
251 * @test_index: Starting index of the test 251 * @test_index: Starting index of the test
252 * @strings: Ethtool strings, or %NULL 252 * @strings: Ethtool strings, or %NULL
253 * @data: Ethtool test results, or %NULL 253 * @data: Ethtool test results, or %NULL
254 *
255 * Fill in a block of loopback self-test entries. Return new test
256 * index.
254 */ 257 */
255static int efx_fill_loopback_test(struct efx_nic *efx, 258static int efx_fill_loopback_test(struct efx_nic *efx,
256 struct efx_loopback_self_tests *lb_tests, 259 struct efx_loopback_self_tests *lb_tests,
@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
290 * @tests: Efx self-test results structure, or %NULL 293 * @tests: Efx self-test results structure, or %NULL
291 * @strings: Ethtool strings, or %NULL 294 * @strings: Ethtool strings, or %NULL
292 * @data: Ethtool test results, or %NULL 295 * @data: Ethtool test results, or %NULL
296 *
297 * Get self-test number of strings, strings, and/or test results.
298 * Return number of strings (== number of test results).
299 *
300 * The reason for merging these three functions is to make sure that
301 * they can never be inconsistent.
293 */ 302 */
294static int efx_ethtool_fill_self_tests(struct efx_nic *efx, 303static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
295 struct efx_self_tests *tests, 304 struct efx_self_tests *tests,
@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
444{ 453{
445 struct efx_nic *efx = netdev_priv(net_dev); 454 struct efx_nic *efx = netdev_priv(net_dev);
446 struct efx_self_tests *efx_tests; 455 struct efx_self_tests *efx_tests;
447 int already_up; 456 bool already_up;
448 int rc = -ENOMEM; 457 int rc = -ENOMEM;
449 458
450 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); 459 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
452 goto fail; 461 goto fail;
453 462
454 if (efx->state != STATE_READY) { 463 if (efx->state != STATE_READY) {
455 rc = -EIO; 464 rc = -EBUSY;
456 goto fail1; 465 goto out;
457 } 466 }
458 467
459 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", 468 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
466 if (rc) { 475 if (rc) {
467 netif_err(efx, drv, efx->net_dev, 476 netif_err(efx, drv, efx->net_dev,
468 "failed opening device.\n"); 477 "failed opening device.\n");
469 goto fail1; 478 goto out;
470 } 479 }
471 } 480 }
472 481
@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
479 rc == 0 ? "passed" : "failed", 488 rc == 0 ? "passed" : "failed",
480 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 489 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
481 490
482fail1: 491out:
483 /* Fill ethtool results structures */
484 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); 492 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
485 kfree(efx_tests); 493 kfree(efx_tests);
486fail: 494fail:
@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
691 pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); 699 pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
692} 700}
693 701
694
695static void efx_ethtool_get_wol(struct net_device *net_dev, 702static void efx_ethtool_get_wol(struct net_device *net_dev,
696 struct ethtool_wolinfo *wol) 703 struct ethtool_wolinfo *wol)
697{ 704{
@@ -720,7 +727,7 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
720} 727}
721 728
722/* MAC address mask including only I/G bit */ 729/* MAC address mask including only I/G bit */
723static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 730static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
724 731
725#define IP4_ADDR_FULL_MASK ((__force __be32)~0) 732#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
726#define PORT_FULL_MASK ((__force __be16)~0) 733#define PORT_FULL_MASK ((__force __be16)~0)
@@ -780,16 +787,16 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
780 rule->flow_type = ETHER_FLOW; 787 rule->flow_type = ETHER_FLOW;
781 if (spec.match_flags & 788 if (spec.match_flags &
782 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { 789 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
783 memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN); 790 ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
784 if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) 791 if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
785 memset(mac_mask->h_dest, ~0, ETH_ALEN); 792 eth_broadcast_addr(mac_mask->h_dest);
786 else 793 else
787 memcpy(mac_mask->h_dest, mac_addr_ig_mask, 794 ether_addr_copy(mac_mask->h_dest,
788 ETH_ALEN); 795 mac_addr_ig_mask);
789 } 796 }
790 if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { 797 if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
791 memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN); 798 ether_addr_copy(mac_entry->h_source, spec.rem_mac);
792 memset(mac_mask->h_source, ~0, ETH_ALEN); 799 eth_broadcast_addr(mac_mask->h_source);
793 } 800 }
794 if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 801 if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
795 mac_entry->h_proto = spec.ether_type; 802 mac_entry->h_proto = spec.ether_type;
@@ -961,13 +968,13 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
961 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; 968 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
962 else 969 else
963 return -EINVAL; 970 return -EINVAL;
964 memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN); 971 ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
965 } 972 }
966 if (!is_zero_ether_addr(mac_mask->h_source)) { 973 if (!is_zero_ether_addr(mac_mask->h_source)) {
967 if (!is_broadcast_ether_addr(mac_mask->h_source)) 974 if (!is_broadcast_ether_addr(mac_mask->h_source))
968 return -EINVAL; 975 return -EINVAL;
969 spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; 976 spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
970 memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN); 977 ether_addr_copy(spec.rem_mac, mac_entry->h_source);
971 } 978 }
972 if (mac_mask->h_proto) { 979 if (mac_mask->h_proto) {
973 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) 980 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 18d6f761f4d0..8ec20b713cc6 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
422 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS); 422 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
423} 423}
424 424
425
426static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 425static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
427{ 426{
428 struct efx_nic *efx = dev_id; 427 struct efx_nic *efx = dev_id;
@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
467 efx_schedule_channel_irq(efx_get_channel(efx, 1)); 466 efx_schedule_channel_irq(efx_get_channel(efx, 1));
468 return IRQ_HANDLED; 467 return IRQ_HANDLED;
469} 468}
469
470/************************************************************************** 470/**************************************************************************
471 * 471 *
472 * RSS 472 * RSS
@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1358 case 100: link_speed = 1; break; 1358 case 100: link_speed = 1; break;
1359 default: link_speed = 0; break; 1359 default: link_speed = 0; break;
1360 } 1360 }
1361
1361 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work 1362 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1362 * as advertised. Disable to ensure packets are not 1363 * as advertised. Disable to ensure packets are not
1363 * indefinitely held and TX queue can be flushed at any point 1364 * indefinitely held and TX queue can be flushed at any point
@@ -2182,7 +2183,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2182 } 2183 }
2183 2184
2184 /* Read the MAC addresses */ 2185 /* Read the MAC addresses */
2185 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN); 2186 ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
2186 2187
2187 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", 2188 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2188 efx->phy_type, efx->mdio.prtad); 2189 efx->phy_type, efx->mdio.prtad);
@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
2868 .mcdi_max_ver = -1, 2869 .mcdi_max_ver = -1,
2869 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, 2870 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2870}; 2871};
2871
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f72489a105ca..a08761360cdf 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
311 */ 311 */
312void efx_farch_tx_write(struct efx_tx_queue *tx_queue) 312void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
313{ 313{
314
315 struct efx_tx_buffer *buffer; 314 struct efx_tx_buffer *buffer;
316 efx_qword_t *txd; 315 efx_qword_t *txd;
317 unsigned write_ptr; 316 unsigned write_ptr;
@@ -1249,6 +1248,9 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
1249 int tx_packets = 0; 1248 int tx_packets = 0;
1250 int spent = 0; 1249 int spent = 0;
1251 1250
1251 if (budget <= 0)
1252 return spent;
1253
1252 read_ptr = channel->eventq_read_ptr; 1254 read_ptr = channel->eventq_read_ptr;
1253 1255
1254 for (;;) { 1256 for (;;) {
@@ -1609,7 +1611,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1609 return IRQ_HANDLED; 1611 return IRQ_HANDLED;
1610} 1612}
1611 1613
1612
1613/* Setup RSS indirection table. 1614/* Setup RSS indirection table.
1614 * This maps from the hash value of the packet to RXQ 1615 * This maps from the hash value of the packet to RXQ
1615 */ 1616 */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3ef298d3c47e..d0ed7f71ea7e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -243,7 +243,7 @@ static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
243 } 243 }
244 if (addr != NULL) { 244 if (addr != NULL) {
245 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; 245 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
246 memcpy(spec->loc_mac, addr, ETH_ALEN); 246 ether_addr_copy(spec->loc_mac, addr);
247 } 247 }
248 return 0; 248 return 0;
249} 249}
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index eb59abb57e85..7bd4b14bf3b3 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1187,6 +1187,9 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1187 int rc; 1187 int rc;
1188 1188
1189 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 1189 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
1190 /* we need __aligned(2) for ether_addr_copy */
1191 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
1192 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
1190 1193
1191 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 1194 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
1192 outbuf, sizeof(outbuf), &outlen); 1195 outbuf, sizeof(outbuf), &outlen);
@@ -1199,11 +1202,10 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1199 } 1202 }
1200 1203
1201 if (mac_address) 1204 if (mac_address)
1202 memcpy(mac_address, 1205 ether_addr_copy(mac_address,
1203 port_num ? 1206 port_num ?
1204 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : 1207 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
1205 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0), 1208 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
1206 ETH_ALEN);
1207 if (fw_subtype_list) { 1209 if (fw_subtype_list) {
1208 for (i = 0; 1210 for (i = 0;
1209 i < MCDI_VAR_ARRAY_LEN(outlen, 1211 i < MCDI_VAR_ARRAY_LEN(outlen,
@@ -1532,7 +1534,7 @@ static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1532 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1534 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1533 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1535 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1534 MC_CMD_FILTER_MODE_SIMPLE); 1536 MC_CMD_FILTER_MODE_SIMPLE);
1535 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); 1537 ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
1536 1538
1537 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1539 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1538 outbuf, sizeof(outbuf), &outlen); 1540 outbuf, sizeof(outbuf), &outlen);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 91d23252f8fa..e5fc4e1574b5 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -854,8 +854,8 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
854 854
855 BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); 855 BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
856 856
857 memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), 857 ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
858 efx->net_dev->dev_addr, ETH_ALEN); 858 efx->net_dev->dev_addr);
859 859
860 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, 860 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
861 EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); 861 EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index af2b8c59a903..8a400a0595eb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1323 return &rx_queue->buffer[index]; 1323 return &rx_queue->buffer[index];
1324} 1324}
1325 1325
1326
1327/** 1326/**
1328 * EFX_MAX_FRAME_LEN - calculate maximum frame length 1327 * EFX_MAX_FRAME_LEN - calculate maximum frame length
1329 * 1328 *
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 79226b19e3c4..32d969e857f7 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
530 efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); 530 efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
531 *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; 531 *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
532} 532}
533
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index d7a36829649a..6b861e3de4b0 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
223 * @evt_list: List of MC receive events awaiting packets 223 * @evt_list: List of MC receive events awaiting packets
224 * @evt_free_list: List of free events 224 * @evt_free_list: List of free events
225 * @evt_lock: Lock for manipulating evt_list and evt_free_list 225 * @evt_lock: Lock for manipulating evt_list and evt_free_list
226 * @evt_overflow: Boolean indicating that event list has overflowed
227 * @rx_evts: Instantiated events (on evt_list and evt_free_list) 226 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
228 * @workwq: Work queue for processing pending PTP operations 227 * @workwq: Work queue for processing pending PTP operations
229 * @work: Work task 228 * @work: Work task
@@ -275,7 +274,6 @@ struct efx_ptp_data {
275 struct list_head evt_list; 274 struct list_head evt_list;
276 struct list_head evt_free_list; 275 struct list_head evt_free_list;
277 spinlock_t evt_lock; 276 spinlock_t evt_lock;
278 bool evt_overflow;
279 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; 277 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
280 struct workqueue_struct *workwq; 278 struct workqueue_struct *workwq;
281 struct work_struct work; 279 struct work_struct work;
@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
768 return -EAGAIN; 766 return -EAGAIN;
769 } 767 }
770 768
771 /* Convert the NIC time into kernel time. No correction is required- 769 /* Calculate delay from last good sync (host time) to last_time.
772 * this time is the output of a firmware process. 770 * It is possible that the seconds rolled over between taking
773 */
774 mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
775 ptp->timeset[last_good].minor, 0);
776
777 /* Calculate delay from actual PPS to last_time */
778 delta = ktime_to_timespec(mc_time);
779 delta.tv_nsec +=
780 last_time->ts_real.tv_nsec -
781 (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
782
783 /* It is possible that the seconds rolled over between taking
784 * the start reading and the last value written by the host. The 771 * the start reading and the last value written by the host. The
785 * timescales are such that a gap of more than one second is never 772 * timescales are such that a gap of more than one second is never
786 * expected. 773 * expected. delta is *not* normalised.
787 */ 774 */
788 start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; 775 start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
789 last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; 776 last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
790 if (start_sec != last_sec) { 777 if (start_sec != last_sec &&
791 if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) { 778 ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
792 netif_warn(efx, hw, efx->net_dev, 779 netif_warn(efx, hw, efx->net_dev,
793 "PTP bad synchronisation seconds\n"); 780 "PTP bad synchronisation seconds\n");
794 return -EAGAIN; 781 return -EAGAIN;
795 } else {
796 delta.tv_sec = 1;
797 }
798 } else {
799 delta.tv_sec = 0;
800 } 782 }
783 delta.tv_sec = (last_sec - start_sec) & 1;
784 delta.tv_nsec =
785 last_time->ts_real.tv_nsec -
786 (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
787
788 /* Convert the NIC time at last good sync into kernel time.
789 * No correction is required - this time is the output of a
790 * firmware process.
791 */
792 mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
793 ptp->timeset[last_good].minor, 0);
794
795 /* Calculate delay from NIC top of second to last_time */
796 delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
801 797
798 /* Set PPS timestamp to match NIC top of second */
802 ptp->host_time_pps = *last_time; 799 ptp->host_time_pps = *last_time;
803 pps_sub_ts(&ptp->host_time_pps, delta); 800 pps_sub_ts(&ptp->host_time_pps, delta);
804 801
@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
941 } 938 }
942 } 939 }
943 } 940 }
944 /* If the event overflow flag is set and the event list is now empty
945 * clear the flag to re-enable the overflow warning message.
946 */
947 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
948 ptp->evt_overflow = false;
949 spin_unlock_bh(&ptp->evt_lock); 941 spin_unlock_bh(&ptp->evt_lock);
950} 942}
951 943
@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
989 break; 981 break;
990 } 982 }
991 } 983 }
992 /* If the event overflow flag is set and the event list is now empty
993 * clear the flag to re-enable the overflow warning message.
994 */
995 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
996 ptp->evt_overflow = false;
997 spin_unlock_bh(&ptp->evt_lock); 984 spin_unlock_bh(&ptp->evt_lock);
998 985
999 return rc; 986 return rc;
@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
1147 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 1134 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
1148 list_move(cursor, &efx->ptp_data->evt_free_list); 1135 list_move(cursor, &efx->ptp_data->evt_free_list);
1149 } 1136 }
1150 ptp->evt_overflow = false;
1151 spin_unlock_bh(&efx->ptp_data->evt_lock); 1137 spin_unlock_bh(&efx->ptp_data->evt_lock);
1152 1138
1153 return rc; 1139 return rc;
@@ -1208,6 +1194,7 @@ static const struct ptp_clock_info efx_phc_clock_info = {
1208 .n_alarm = 0, 1194 .n_alarm = 0,
1209 .n_ext_ts = 0, 1195 .n_ext_ts = 0,
1210 .n_per_out = 0, 1196 .n_per_out = 0,
1197 .n_pins = 0,
1211 .pps = 1, 1198 .pps = 1,
1212 .adjfreq = efx_phc_adjfreq, 1199 .adjfreq = efx_phc_adjfreq,
1213 .adjtime = efx_phc_adjtime, 1200 .adjtime = efx_phc_adjtime,
@@ -1253,7 +1240,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
1253 spin_lock_init(&ptp->evt_lock); 1240 spin_lock_init(&ptp->evt_lock);
1254 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) 1241 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
1255 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); 1242 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
1256 ptp->evt_overflow = false;
1257 1243
1258 /* Get the NIC PTP attributes and set up time conversions */ 1244 /* Get the NIC PTP attributes and set up time conversions */
1259 rc = efx_ptp_get_attributes(efx); 1245 rc = efx_ptp_get_attributes(efx);
@@ -1380,6 +1366,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1380 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; 1366 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
1381 u8 *match_data_012, *match_data_345; 1367 u8 *match_data_012, *match_data_345;
1382 unsigned int version; 1368 unsigned int version;
1369 u8 *data;
1383 1370
1384 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); 1371 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1385 1372
@@ -1388,7 +1375,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1388 if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { 1375 if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
1389 return false; 1376 return false;
1390 } 1377 }
1391 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); 1378 data = skb->data;
1379 version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
1392 if (version != PTP_VERSION_V1) { 1380 if (version != PTP_VERSION_V1) {
1393 return false; 1381 return false;
1394 } 1382 }
@@ -1396,13 +1384,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1396 /* PTP V1 uses all six bytes of the UUID to match the packet 1384 /* PTP V1 uses all six bytes of the UUID to match the packet
1397 * to the timestamp 1385 * to the timestamp
1398 */ 1386 */
1399 match_data_012 = skb->data + PTP_V1_UUID_OFFSET; 1387 match_data_012 = data + PTP_V1_UUID_OFFSET;
1400 match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; 1388 match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
1401 } else { 1389 } else {
1402 if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { 1390 if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
1403 return false; 1391 return false;
1404 } 1392 }
1405 version = skb->data[PTP_V2_VERSION_OFFSET]; 1393 data = skb->data;
1394 version = data[PTP_V2_VERSION_OFFSET];
1406 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { 1395 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
1407 return false; 1396 return false;
1408 } 1397 }
@@ -1414,17 +1403,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1414 * enhanced mode fixes this issue and uses bytes 0-2 1403 * enhanced mode fixes this issue and uses bytes 0-2
1415 * and byte 5-7 of the UUID. 1404 * and byte 5-7 of the UUID.
1416 */ 1405 */
1417 match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5; 1406 match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
1418 if (ptp->mode == MC_CMD_PTP_MODE_V2) { 1407 if (ptp->mode == MC_CMD_PTP_MODE_V2) {
1419 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2; 1408 match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
1420 } else { 1409 } else {
1421 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0; 1410 match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
1422 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); 1411 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
1423 } 1412 }
1424 } 1413 }
1425 1414
1426 /* Does this packet require timestamping? */ 1415 /* Does this packet require timestamping? */
1427 if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { 1416 if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
1428 match->state = PTP_PACKET_STATE_UNMATCHED; 1417 match->state = PTP_PACKET_STATE_UNMATCHED;
1429 1418
1430 /* We expect the sequence number to be in the same position in 1419 /* We expect the sequence number to be in the same position in
@@ -1440,8 +1429,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1440 (match_data_345[0] << 24)); 1429 (match_data_345[0] << 24));
1441 match->words[1] = (match_data_345[1] | 1430 match->words[1] = (match_data_345[1] |
1442 (match_data_345[2] << 8) | 1431 (match_data_345[2] << 8) |
1443 (skb->data[PTP_V1_SEQUENCE_OFFSET + 1432 (data[PTP_V1_SEQUENCE_OFFSET +
1444 PTP_V1_SEQUENCE_LENGTH - 1] << 1433 PTP_V1_SEQUENCE_LENGTH - 1] <<
1445 16)); 1434 16));
1446 } else { 1435 } else {
1447 match->state = PTP_PACKET_STATE_MATCH_UNWANTED; 1436 match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
@@ -1635,13 +1624,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
1635 list_add_tail(&evt->link, &ptp->evt_list); 1624 list_add_tail(&evt->link, &ptp->evt_list);
1636 1625
1637 queue_work(ptp->workwq, &ptp->work); 1626 queue_work(ptp->workwq, &ptp->work);
1638 } else if (!ptp->evt_overflow) { 1627 } else if (net_ratelimit()) {
1639 /* Log a warning message and set the event overflow flag. 1628 /* Log a rate-limited warning message. */
1640 * The message won't be logged again until the event queue
1641 * becomes empty.
1642 */
1643 netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); 1629 netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
1644 ptp->evt_overflow = true;
1645 } 1630 }
1646 spin_unlock_bh(&ptp->evt_lock); 1631 spin_unlock_bh(&ptp->evt_lock);
1647} 1632}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 26641817a9c7..0fc5baef45b1 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -50,7 +50,7 @@ struct efx_loopback_payload {
50} __packed; 50} __packed;
51 51
52/* Loopback test source MAC address */ 52/* Loopback test source MAC address */
53static const unsigned char payload_source[ETH_ALEN] = { 53static const u8 payload_source[ETH_ALEN] __aligned(2) = {
54 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 54 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
55}; 55};
56 56
@@ -366,8 +366,8 @@ static void efx_iterate_state(struct efx_nic *efx)
366 struct efx_loopback_payload *payload = &state->payload; 366 struct efx_loopback_payload *payload = &state->payload;
367 367
368 /* Initialise the layerII header */ 368 /* Initialise the layerII header */
369 memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); 369 ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
370 memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); 370 ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
371 payload->header.h_proto = htons(ETH_P_IP); 371 payload->header.h_proto = htons(ETH_P_IP);
372 372
373 /* saddr set later and used as incrementing count */ 373 /* saddr set later and used as incrementing count */
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 0c38f926871e..9a9205e77896 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1095,7 +1095,7 @@ static void efx_sriov_peer_work(struct work_struct *data)
1095 1095
1096 /* Fill the remaining addresses */ 1096 /* Fill the remaining addresses */
1097 list_for_each_entry(local_addr, &efx->local_addr_list, link) { 1097 list_for_each_entry(local_addr, &efx->local_addr_list, link) {
1098 memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN); 1098 ether_addr_copy(peer->mac_addr, local_addr->addr);
1099 peer->tci = 0; 1099 peer->tci = 0;
1100 ++peer; 1100 ++peer;
1101 ++peer_count; 1101 ++peer_count;
@@ -1303,8 +1303,7 @@ int efx_sriov_init(struct efx_nic *efx)
1303 goto fail_vfs; 1303 goto fail_vfs;
1304 1304
1305 rtnl_lock(); 1305 rtnl_lock();
1306 memcpy(vfdi_status->peers[0].mac_addr, 1306 ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
1307 net_dev->dev_addr, ETH_ALEN);
1308 efx->vf_init_count = efx->vf_count; 1307 efx->vf_init_count = efx->vf_count;
1309 rtnl_unlock(); 1308 rtnl_unlock();
1310 1309
@@ -1452,8 +1451,8 @@ void efx_sriov_mac_address_changed(struct efx_nic *efx)
1452 1451
1453 if (!efx->vf_init_count) 1452 if (!efx->vf_init_count)
1454 return; 1453 return;
1455 memcpy(vfdi_status->peers[0].mac_addr, 1454 ether_addr_copy(vfdi_status->peers[0].mac_addr,
1456 efx->net_dev->dev_addr, ETH_ALEN); 1455 efx->net_dev->dev_addr);
1457 queue_work(vfdi_workqueue, &efx->peer_work); 1456 queue_work(vfdi_workqueue, &efx->peer_work);
1458} 1457}
1459 1458
@@ -1570,7 +1569,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
1570 vf = efx->vf + vf_i; 1569 vf = efx->vf + vf_i;
1571 1570
1572 mutex_lock(&vf->status_lock); 1571 mutex_lock(&vf->status_lock);
1573 memcpy(vf->addr.mac_addr, mac, ETH_ALEN); 1572 ether_addr_copy(vf->addr.mac_addr, mac);
1574 __efx_sriov_update_vf_addr(vf); 1573 __efx_sriov_update_vf_addr(vf);
1575 mutex_unlock(&vf->status_lock); 1574 mutex_unlock(&vf->status_lock);
1576 1575
@@ -1633,7 +1632,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1633 vf = efx->vf + vf_i; 1632 vf = efx->vf + vf_i;
1634 1633
1635 ivi->vf = vf_i; 1634 ivi->vf = vf_i;
1636 memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN); 1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr);
1637 ivi->tx_rate = 0; 1636 ivi->tx_rate = 0;
1638 tci = ntohs(vf->addr.tci); 1637 tci = ntohs(vf->addr.tci);
1639 ivi->vlan = tci & VLAN_VID_MASK; 1638 ivi->vlan = tci & VLAN_VID_MASK;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 75d11fa4eb0a..fa9475300411 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
787 * Requires TX checksum offload support. 787 * Requires TX checksum offload support.
788 */ 788 */
789 789
790/* Number of bytes inserted at the start of a TSO header buffer,
791 * similar to NET_IP_ALIGN.
792 */
793#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
794#define TSOH_OFFSET 0
795#else
796#define TSOH_OFFSET NET_IP_ALIGN
797#endif
798
799#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 790#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
800 791
801/** 792/**
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
882 EFX_BUG_ON_PARANOID(buffer->flags); 873 EFX_BUG_ON_PARANOID(buffer->flags);
883 EFX_BUG_ON_PARANOID(buffer->unmap_len); 874 EFX_BUG_ON_PARANOID(buffer->unmap_len);
884 875
885 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { 876 if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
886 unsigned index = 877 unsigned index =
887 (tx_queue->insert_count & tx_queue->ptr_mask) / 2; 878 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
888 struct efx_buffer *page_buf = 879 struct efx_buffer *page_buf =
889 &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; 880 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
890 unsigned offset = 881 unsigned offset =
891 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; 882 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
892 883
893 if (unlikely(!page_buf->addr) && 884 if (unlikely(!page_buf->addr) &&
894 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, 885 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
901 } else { 892 } else {
902 tx_queue->tso_long_headers++; 893 tx_queue->tso_long_headers++;
903 894
904 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); 895 buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
905 if (unlikely(!buffer->heap_buf)) 896 if (unlikely(!buffer->heap_buf))
906 return NULL; 897 return NULL;
907 result = (u8 *)buffer->heap_buf + TSOH_OFFSET; 898 result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
908 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; 899 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
909 } 900 }
910 901
@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
1011static int tso_start(struct tso_state *st, struct efx_nic *efx, 1002static int tso_start(struct tso_state *st, struct efx_nic *efx,
1012 const struct sk_buff *skb) 1003 const struct sk_buff *skb)
1013{ 1004{
1014 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; 1005 bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
1015 struct device *dma_dev = &efx->pci_dev->dev; 1006 struct device *dma_dev = &efx->pci_dev->dev;
1016 unsigned int header_len, in_len; 1007 unsigned int header_len, in_len;
1017 dma_addr_t dma_addr; 1008 dma_addr_t dma_addr;
@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
1037 1028
1038 st->out_len = skb->len - header_len; 1029 st->out_len = skb->len - header_len;
1039 1030
1040 if (!use_options) { 1031 if (!use_opt_desc) {
1041 st->header_unmap_len = 0; 1032 st->header_unmap_len = 0;
1042 1033
1043 if (likely(in_len == 0)) { 1034 if (likely(in_len == 0)) {
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 5eb933c97bba..7daa7d433099 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -987,7 +987,7 @@ out_unlock:
987 spin_unlock(&priv->lock); 987 spin_unlock(&priv->lock);
988 988
989out: 989out:
990 dev_kfree_skb(skb); 990 dev_consume_skb_any(skb);
991 991
992 return NETDEV_TX_OK; 992 return NETDEV_TX_OK;
993} 993}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index ff57a46388ee..6072f093e6b4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1614,7 +1614,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1614 skb->data, skb->len, PCI_DMA_TODEVICE); 1614 skb->data, skb->len, PCI_DMA_TODEVICE);
1615 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, 1615 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1616 sis_priv->tx_ring[entry].bufptr))) { 1616 sis_priv->tx_ring[entry].bufptr))) {
1617 dev_kfree_skb(skb); 1617 dev_kfree_skb_any(skb);
1618 sis_priv->tx_skbuff[entry] = NULL; 1618 sis_priv->tx_skbuff[entry] = NULL;
1619 net_dev->stats.tx_dropped++; 1619 net_dev->stats.tx_dropped++;
1620 spin_unlock_irqrestore(&sis_priv->lock, flags); 1620 spin_unlock_irqrestore(&sis_priv->lock, flags);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index c50fb08c9905..66b05e62f70a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -551,7 +551,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
551 dev->stats.tx_errors++; 551 dev->stats.tx_errors++;
552 dev->stats.tx_dropped++; 552 dev->stats.tx_dropped++;
553 spin_unlock_irqrestore(&lp->lock, flags); 553 spin_unlock_irqrestore(&lp->lock, flags);
554 dev_kfree_skb(skb); 554 dev_kfree_skb_any(skb);
555 return NETDEV_TX_OK; 555 return NETDEV_TX_OK;
556 } 556 }
557 557
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 839c0e6cca01..d1b4dca53a9d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -621,7 +621,7 @@ static void smc_hardware_send_pkt(unsigned long data)
621done: if (!THROTTLE_TX_PKTS) 621done: if (!THROTTLE_TX_PKTS)
622 netif_wake_queue(dev); 622 netif_wake_queue(dev);
623 623
624 dev_kfree_skb(skb); 624 dev_consume_skb_any(skb);
625} 625}
626 626
627/* 627/*
@@ -657,7 +657,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
657 netdev_warn(dev, "Far too big packet error.\n"); 657 netdev_warn(dev, "Far too big packet error.\n");
658 dev->stats.tx_errors++; 658 dev->stats.tx_errors++;
659 dev->stats.tx_dropped++; 659 dev->stats.tx_dropped++;
660 dev_kfree_skb(skb); 660 dev_kfree_skb_any(skb);
661 return NETDEV_TX_OK; 661 return NETDEV_TX_OK;
662 } 662 }
663 663
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 6382b7c416f4..a0fc151da40d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -439,7 +439,8 @@ static int smsc911x_request_resources(struct platform_device *pdev)
439 /* Request clock */ 439 /* Request clock */
440 pdata->clk = clk_get(&pdev->dev, NULL); 440 pdata->clk = clk_get(&pdev->dev, NULL);
441 if (IS_ERR(pdata->clk)) 441 if (IS_ERR(pdata->clk))
442 netdev_warn(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk)); 442 dev_dbg(&pdev->dev, "couldn't get clock %li\n",
443 PTR_ERR(pdata->clk));
443 444
444 return ret; 445 return ret;
445} 446}
@@ -1672,7 +1673,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1672 pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); 1673 pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
1673 freespace -= (skb->len + 32); 1674 freespace -= (skb->len + 32);
1674 skb_tx_timestamp(skb); 1675 skb_tx_timestamp(skb);
1675 dev_kfree_skb(skb); 1676 dev_consume_skb_any(skb);
1676 1677
1677 if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) 1678 if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
1678 smsc911x_tx_update_txcounters(dev); 1679 smsc911x_tx_update_txcounters(dev);
@@ -2379,8 +2380,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2379 int res_size, irq_flags; 2380 int res_size, irq_flags;
2380 int retval; 2381 int retval;
2381 2382
2382 pr_info("Driver version %s\n", SMSC_DRV_VERSION);
2383
2384 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2383 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2385 "smsc911x-memory"); 2384 "smsc911x-memory");
2386 if (!res) 2385 if (!res)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index f2d7c702c77f..2d09c116cbc8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -26,6 +26,16 @@ config STMMAC_PLATFORM
26 26
27 If unsure, say N. 27 If unsure, say N.
28 28
29config DWMAC_SOCFPGA
30 bool "SOCFPGA dwmac support"
31 depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST)
32 help
33 Support for ethernet controller on Altera SOCFPGA
34
35 This selects the Altera SOCFPGA SoC glue layer support
36 for the stmmac device driver. This driver is used for
37 arria5 and cyclone5 FPGA SoCs.
38
29config DWMAC_SUNXI 39config DWMAC_SUNXI
30 bool "Allwinner GMAC support" 40 bool "Allwinner GMAC support"
31 depends on STMMAC_PLATFORM && ARCH_SUNXI 41 depends on STMMAC_PLATFORM && ARCH_SUNXI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index dcef28775dad..18695ebef7e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -3,6 +3,7 @@ stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
4stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o 4stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
5stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o 5stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
6stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
6stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ 7stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
7 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 8 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 9 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
new file mode 100644
index 000000000000..fd8a217556a1
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -0,0 +1,130 @@
1/* Copyright Altera Corporation (C) 2014. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License, version 2,
5 * as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14 *
15 * Adopted from dwmac-sti.c
16 */
17
18#include <linux/mfd/syscon.h>
19#include <linux/of.h>
20#include <linux/of_net.h>
21#include <linux/phy.h>
22#include <linux/regmap.h>
23#include <linux/stmmac.h>
24
25#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
26#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
27#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
28#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
29#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
30
31struct socfpga_dwmac {
32 int interface;
33 u32 reg_offset;
34 u32 reg_shift;
35 struct device *dev;
36 struct regmap *sys_mgr_base_addr;
37};
38
39static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
40{
41 struct device_node *np = dev->of_node;
42 struct regmap *sys_mgr_base_addr;
43 u32 reg_offset, reg_shift;
44 int ret;
45
46 dwmac->interface = of_get_phy_mode(np);
47
48 sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
49 if (IS_ERR(sys_mgr_base_addr)) {
50 dev_info(dev, "No sysmgr-syscon node found\n");
51 return PTR_ERR(sys_mgr_base_addr);
52 }
53
54 ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, &reg_offset);
55 if (ret) {
56 dev_info(dev, "Could not read reg_offset from sysmgr-syscon!\n");
57 return -EINVAL;
58 }
59
60 ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, &reg_shift);
61 if (ret) {
62 dev_info(dev, "Could not read reg_shift from sysmgr-syscon!\n");
63 return -EINVAL;
64 }
65
66 dwmac->reg_offset = reg_offset;
67 dwmac->reg_shift = reg_shift;
68 dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
69 dwmac->dev = dev;
70
71 return 0;
72}
73
74static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
75{
76 struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
77 int phymode = dwmac->interface;
78 u32 reg_offset = dwmac->reg_offset;
79 u32 reg_shift = dwmac->reg_shift;
80 u32 ctrl, val;
81
82 switch (phymode) {
83 case PHY_INTERFACE_MODE_RGMII:
84 val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
85 break;
86 case PHY_INTERFACE_MODE_MII:
87 case PHY_INTERFACE_MODE_GMII:
88 val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
89 break;
90 default:
91 dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
92 return -EINVAL;
93 }
94
95 regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
96 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
97 ctrl |= val << reg_shift;
98
99 regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
100 return 0;
101}
102
103static void *socfpga_dwmac_probe(struct platform_device *pdev)
104{
105 struct device *dev = &pdev->dev;
106 int ret;
107 struct socfpga_dwmac *dwmac;
108
109 dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
110 if (!dwmac)
111 return ERR_PTR(-ENOMEM);
112
113 ret = socfpga_dwmac_parse_data(dwmac, dev);
114 if (ret) {
115 dev_err(dev, "Unable to parse OF data\n");
116 return ERR_PTR(ret);
117 }
118
119 ret = socfpga_dwmac_setup(dwmac);
120 if (ret) {
121 dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
122 return ERR_PTR(ret);
123 }
124
125 return dwmac;
126}
127
128const struct stmmac_of_data socfpga_gmac_data = {
129 .setup = socfpga_dwmac_probe,
130};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f9e60d7918c4..ca01035634a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -136,6 +136,9 @@ extern const struct stmmac_of_data sun7i_gmac_data;
136#ifdef CONFIG_DWMAC_STI 136#ifdef CONFIG_DWMAC_STI
137extern const struct stmmac_of_data sti_gmac_data; 137extern const struct stmmac_of_data sti_gmac_data;
138#endif 138#endif
139#ifdef CONFIG_DWMAC_SOCFPGA
140extern const struct stmmac_of_data socfpga_gmac_data;
141#endif
139extern struct platform_driver stmmac_pltfr_driver; 142extern struct platform_driver stmmac_pltfr_driver;
140static inline int stmmac_register_platform(void) 143static inline int stmmac_register_platform(void)
141{ 144{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8543e1cfd55e..d940034acdd4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1303,7 +1303,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1303 priv->hw->mode->clean_desc3(priv, p); 1303 priv->hw->mode->clean_desc3(priv, p);
1304 1304
1305 if (likely(skb != NULL)) { 1305 if (likely(skb != NULL)) {
1306 dev_kfree_skb(skb); 1306 dev_consume_skb_any(skb);
1307 priv->tx_skbuff[entry] = NULL; 1307 priv->tx_skbuff[entry] = NULL;
1308 } 1308 }
1309 1309
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8fb32a80f1c1..46aef5108bea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -38,6 +38,9 @@ static const struct of_device_id stmmac_dt_ids[] = {
38 { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, 38 { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
39 { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, 39 { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
40#endif 40#endif
41#ifdef CONFIG_DWMAC_SOCFPGA
42 { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
43#endif
41 /* SoC specific glue layers should come before generic bindings */ 44 /* SoC specific glue layers should come before generic bindings */
42 { .compatible = "st,spear600-gmac"}, 45 { .compatible = "st,spear600-gmac"},
43 { .compatible = "snps,dwmac-3.610"}, 46 { .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 7680581ebe12..b7ad3565566c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -164,6 +164,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
164 .n_alarm = 0, 164 .n_alarm = 0,
165 .n_ext_ts = 0, 165 .n_ext_ts = 0,
166 .n_per_out = 0, 166 .n_per_out = 0,
167 .n_pins = 0,
167 .pps = 0, 168 .pps = 0,
168 .adjfreq = stmmac_adjust_freq, 169 .adjfreq = stmmac_adjust_freq,
169 .adjtime = stmmac_adjust_time, 170 .adjtime = stmmac_adjust_time,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8e2266e1f260..79606f47a08e 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9041 struct msix_entry msi_vec[NIU_NUM_LDG]; 9041 struct msix_entry msi_vec[NIU_NUM_LDG];
9042 struct niu_parent *parent = np->parent; 9042 struct niu_parent *parent = np->parent;
9043 struct pci_dev *pdev = np->pdev; 9043 struct pci_dev *pdev = np->pdev;
9044 int i, num_irqs, err; 9044 int i, num_irqs;
9045 u8 first_ldg; 9045 u8 first_ldg;
9046 9046
9047 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 9047 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
@@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9053 (np->port == 0 ? 3 : 1)); 9053 (np->port == 0 ? 3 : 1));
9054 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 9054 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
9055 9055
9056retry:
9057 for (i = 0; i < num_irqs; i++) { 9056 for (i = 0; i < num_irqs; i++) {
9058 msi_vec[i].vector = 0; 9057 msi_vec[i].vector = 0;
9059 msi_vec[i].entry = i; 9058 msi_vec[i].entry = i;
9060 } 9059 }
9061 9060
9062 err = pci_enable_msix(pdev, msi_vec, num_irqs); 9061 num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
9063 if (err < 0) { 9062 if (num_irqs < 0) {
9064 np->flags &= ~NIU_FLAGS_MSIX; 9063 np->flags &= ~NIU_FLAGS_MSIX;
9065 return; 9064 return;
9066 } 9065 }
9067 if (err > 0) {
9068 num_irqs = err;
9069 goto retry;
9070 }
9071 9066
9072 np->flags |= NIU_FLAGS_MSIX; 9067 np->flags |= NIU_FLAGS_MSIX;
9073 for (i = 0; i < num_irqs; i++) 9068 for (i = 0; i < num_irqs; i++)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index c2799dc46325..102a66fc54a2 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -688,7 +688,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
688 } 688 }
689 689
690 dev->stats.tx_packets++; 690 dev->stats.tx_packets++;
691 dev_kfree_skb(skb); 691 dev_consume_skb_any(skb);
692 } 692 }
693 gp->tx_old = entry; 693 gp->tx_old = entry;
694 694
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d6d8ec676c8..5d5fec6c4eb0 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -378,7 +378,6 @@ struct cpsw_priv {
378 u32 version; 378 u32 version;
379 u32 coal_intvl; 379 u32 coal_intvl;
380 u32 bus_freq_mhz; 380 u32 bus_freq_mhz;
381 struct net_device_stats stats;
382 int rx_packet_max; 381 int rx_packet_max;
383 int host_port; 382 int host_port;
384 struct clk *clk; 383 struct clk *clk;
@@ -673,8 +672,8 @@ static void cpsw_tx_handler(void *token, int len, int status)
673 if (unlikely(netif_queue_stopped(ndev))) 672 if (unlikely(netif_queue_stopped(ndev)))
674 netif_wake_queue(ndev); 673 netif_wake_queue(ndev);
675 cpts_tx_timestamp(priv->cpts, skb); 674 cpts_tx_timestamp(priv->cpts, skb);
676 priv->stats.tx_packets++; 675 ndev->stats.tx_packets++;
677 priv->stats.tx_bytes += len; 676 ndev->stats.tx_bytes += len;
678 dev_kfree_skb_any(skb); 677 dev_kfree_skb_any(skb);
679} 678}
680 679
@@ -700,10 +699,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
700 cpts_rx_timestamp(priv->cpts, skb); 699 cpts_rx_timestamp(priv->cpts, skb);
701 skb->protocol = eth_type_trans(skb, ndev); 700 skb->protocol = eth_type_trans(skb, ndev);
702 netif_receive_skb(skb); 701 netif_receive_skb(skb);
703 priv->stats.rx_bytes += len; 702 ndev->stats.rx_bytes += len;
704 priv->stats.rx_packets++; 703 ndev->stats.rx_packets++;
705 } else { 704 } else {
706 priv->stats.rx_dropped++; 705 ndev->stats.rx_dropped++;
707 new_skb = skb; 706 new_skb = skb;
708 } 707 }
709 708
@@ -1313,7 +1312,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1313 1312
1314 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 1313 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1315 cpsw_err(priv, tx_err, "packet pad failed\n"); 1314 cpsw_err(priv, tx_err, "packet pad failed\n");
1316 priv->stats.tx_dropped++; 1315 ndev->stats.tx_dropped++;
1317 return NETDEV_TX_OK; 1316 return NETDEV_TX_OK;
1318 } 1317 }
1319 1318
@@ -1337,7 +1336,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1337 1336
1338 return NETDEV_TX_OK; 1337 return NETDEV_TX_OK;
1339fail: 1338fail:
1340 priv->stats.tx_dropped++; 1339 ndev->stats.tx_dropped++;
1341 netif_stop_queue(ndev); 1340 netif_stop_queue(ndev);
1342 return NETDEV_TX_BUSY; 1341 return NETDEV_TX_BUSY;
1343} 1342}
@@ -1477,7 +1476,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1477static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 1476static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1478{ 1477{
1479 struct cpsw_priv *priv = netdev_priv(dev); 1478 struct cpsw_priv *priv = netdev_priv(dev);
1480 struct mii_ioctl_data *data = if_mii(req);
1481 int slave_no = cpsw_slave_index(priv); 1479 int slave_no = cpsw_slave_index(priv);
1482 1480
1483 if (!netif_running(dev)) 1481 if (!netif_running(dev))
@@ -1490,14 +1488,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1490 case SIOCGHWTSTAMP: 1488 case SIOCGHWTSTAMP:
1491 return cpsw_hwtstamp_get(dev, req); 1489 return cpsw_hwtstamp_get(dev, req);
1492#endif 1490#endif
1493 case SIOCGMIIPHY:
1494 data->phy_id = priv->slaves[slave_no].phy->addr;
1495 break;
1496 default:
1497 return -ENOTSUPP;
1498 } 1491 }
1499 1492
1500 return 0; 1493 if (!priv->slaves[slave_no].phy)
1494 return -EOPNOTSUPP;
1495 return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
1501} 1496}
1502 1497
1503static void cpsw_ndo_tx_timeout(struct net_device *ndev) 1498static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1505,7 +1500,7 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1505 struct cpsw_priv *priv = netdev_priv(ndev); 1500 struct cpsw_priv *priv = netdev_priv(ndev);
1506 1501
1507 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 1502 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1508 priv->stats.tx_errors++; 1503 ndev->stats.tx_errors++;
1509 cpsw_intr_disable(priv); 1504 cpsw_intr_disable(priv);
1510 cpdma_ctlr_int_ctrl(priv->dma, false); 1505 cpdma_ctlr_int_ctrl(priv->dma, false);
1511 cpdma_chan_stop(priv->txch); 1506 cpdma_chan_stop(priv->txch);
@@ -1544,12 +1539,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1544 return 0; 1539 return 0;
1545} 1540}
1546 1541
1547static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
1548{
1549 struct cpsw_priv *priv = netdev_priv(ndev);
1550 return &priv->stats;
1551}
1552
1553#ifdef CONFIG_NET_POLL_CONTROLLER 1542#ifdef CONFIG_NET_POLL_CONTROLLER
1554static void cpsw_ndo_poll_controller(struct net_device *ndev) 1543static void cpsw_ndo_poll_controller(struct net_device *ndev)
1555{ 1544{
@@ -1642,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
1642 .ndo_validate_addr = eth_validate_addr, 1631 .ndo_validate_addr = eth_validate_addr,
1643 .ndo_change_mtu = eth_change_mtu, 1632 .ndo_change_mtu = eth_change_mtu,
1644 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 1633 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
1645 .ndo_get_stats = cpsw_ndo_get_stats,
1646 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 1634 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
1647#ifdef CONFIG_NET_POLL_CONTROLLER 1635#ifdef CONFIG_NET_POLL_CONTROLLER
1648 .ndo_poll_controller = cpsw_ndo_poll_controller, 1636 .ndo_poll_controller = cpsw_ndo_poll_controller,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 8c351f100aca..a3bbf59eaafd 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,10 +31,6 @@
31 31
32#ifdef CONFIG_TI_CPTS 32#ifdef CONFIG_TI_CPTS
33 33
34static struct sock_filter ptp_filter[] = {
35 PTP_FILTER
36};
37
38#define cpts_read32(c, r) __raw_readl(&c->reg->r) 34#define cpts_read32(c, r) __raw_readl(&c->reg->r)
39#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r) 35#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
40 36
@@ -217,6 +213,7 @@ static struct ptp_clock_info cpts_info = {
217 .name = "CTPS timer", 213 .name = "CTPS timer",
218 .max_adj = 1000000, 214 .max_adj = 1000000,
219 .n_ext_ts = 0, 215 .n_ext_ts = 0,
216 .n_pins = 0,
220 .pps = 0, 217 .pps = 0,
221 .adjfreq = cpts_ptp_adjfreq, 218 .adjfreq = cpts_ptp_adjfreq,
222 .adjtime = cpts_ptp_adjtime, 219 .adjtime = cpts_ptp_adjtime,
@@ -300,7 +297,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
300 u64 ns = 0; 297 u64 ns = 0;
301 struct cpts_event *event; 298 struct cpts_event *event;
302 struct list_head *this, *next; 299 struct list_head *this, *next;
303 unsigned int class = sk_run_filter(skb, ptp_filter); 300 unsigned int class = ptp_classify_raw(skb);
304 unsigned long flags; 301 unsigned long flags;
305 u16 seqid; 302 u16 seqid;
306 u8 mtype; 303 u8 mtype;
@@ -371,10 +368,6 @@ int cpts_register(struct device *dev, struct cpts *cpts,
371 int err, i; 368 int err, i;
372 unsigned long flags; 369 unsigned long flags;
373 370
374 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
375 pr_err("cpts: bad ptp filter\n");
376 return -EINVAL;
377 }
378 cpts->info = cpts_info; 371 cpts->info = cpts_info;
379 cpts->clock = ptp_clock_register(&cpts->info, dev); 372 cpts->clock = ptp_clock_register(&cpts->info, dev);
380 if (IS_ERR(cpts->clock)) { 373 if (IS_ERR(cpts->clock)) {
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 17503da9f7a5..7e1c91d41a87 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -659,6 +659,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
659 struct info_mpipe *info_mpipe = 659 struct info_mpipe *info_mpipe =
660 container_of(napi, struct info_mpipe, napi); 660 container_of(napi, struct info_mpipe, napi);
661 661
662 if (budget <= 0)
663 goto done;
664
662 instance = info_mpipe->instance; 665 instance = info_mpipe->instance;
663 while ((n = gxio_mpipe_iqueue_try_peek( 666 while ((n = gxio_mpipe_iqueue_try_peek(
664 &info_mpipe->iqueue, 667 &info_mpipe->iqueue,
@@ -870,6 +873,7 @@ static struct ptp_clock_info ptp_mpipe_caps = {
870 .name = "mPIPE clock", 873 .name = "mPIPE clock",
871 .max_adj = 999999999, 874 .max_adj = 999999999,
872 .n_ext_ts = 0, 875 .n_ext_ts = 0,
876 .n_pins = 0,
873 .pps = 0, 877 .pps = 0,
874 .adjfreq = ptp_mpipe_adjfreq, 878 .adjfreq = ptp_mpipe_adjfreq,
875 .adjtime = ptp_mpipe_adjtime, 879 .adjtime = ptp_mpipe_adjtime,
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index edb2e12a0fe2..e5a5c5d4ce0c 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -831,6 +831,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
831 831
832 unsigned int work = 0; 832 unsigned int work = 0;
833 833
834 if (budget <= 0)
835 goto done;
836
834 while (priv->active) { 837 while (priv->active) {
835 int index = qup->__packet_receive_read; 838 int index = qup->__packet_receive_read;
836 if (index == qsp->__packet_receive_queue.__packet_write) 839 if (index == qsp->__packet_receive_queue.__packet_write)
@@ -1821,7 +1824,7 @@ busy:
1821 1824
1822 /* Handle completions. */ 1825 /* Handle completions. */
1823 for (i = 0; i < nolds; i++) 1826 for (i = 0; i < nolds; i++)
1824 kfree_skb(olds[i]); 1827 dev_consume_skb_any(olds[i]);
1825 1828
1826 /* Update stats. */ 1829 /* Update stats. */
1827 u64_stats_update_begin(&stats->syncp); 1830 u64_stats_update_begin(&stats->syncp);
@@ -2005,7 +2008,7 @@ busy:
2005 2008
2006 /* Handle completions. */ 2009 /* Handle completions. */
2007 for (i = 0; i < nolds; i++) 2010 for (i = 0; i < nolds; i++)
2008 kfree_skb(olds[i]); 2011 dev_consume_skb_any(olds[i]);
2009 2012
2010 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ 2013 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
2011 u64_stats_update_begin(&stats->syncp); 2014 u64_stats_update_begin(&stats->syncp);
@@ -2068,14 +2071,14 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
2068 cpu_stats = &priv->cpu[i]->stats; 2071 cpu_stats = &priv->cpu[i]->stats;
2069 2072
2070 do { 2073 do {
2071 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); 2074 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2072 trx_packets = cpu_stats->rx_packets; 2075 trx_packets = cpu_stats->rx_packets;
2073 ttx_packets = cpu_stats->tx_packets; 2076 ttx_packets = cpu_stats->tx_packets;
2074 trx_bytes = cpu_stats->rx_bytes; 2077 trx_bytes = cpu_stats->rx_bytes;
2075 ttx_bytes = cpu_stats->tx_bytes; 2078 ttx_bytes = cpu_stats->tx_bytes;
2076 trx_errors = cpu_stats->rx_errors; 2079 trx_errors = cpu_stats->rx_errors;
2077 trx_dropped = cpu_stats->rx_dropped; 2080 trx_dropped = cpu_stats->rx_dropped;
2078 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); 2081 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2079 2082
2080 rx_packets += trx_packets; 2083 rx_packets += trx_packets;
2081 tx_packets += ttx_packets; 2084 tx_packets += ttx_packets;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 3f4a32e39d27..0282d0161859 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -860,7 +860,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
860 if (skb) { 860 if (skb) {
861 pci_unmap_single(card->pdev, buf_addr, skb->len, 861 pci_unmap_single(card->pdev, buf_addr, skb->len,
862 PCI_DMA_TODEVICE); 862 PCI_DMA_TODEVICE);
863 dev_kfree_skb(skb); 863 dev_consume_skb_any(skb);
864 } 864 }
865 } 865 }
866 return 0; 866 return 0;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 88e9c73cebc0..fef5573dbfca 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1645,6 +1645,9 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1645 int received = 0, handled; 1645 int received = 0, handled;
1646 u32 status; 1646 u32 status;
1647 1647
1648 if (budget <= 0)
1649 return received;
1650
1648 spin_lock(&lp->rx_lock); 1651 spin_lock(&lp->rx_lock);
1649 status = tc_readl(&tr->Int_Src); 1652 status = tc_readl(&tr->Int_Src);
1650 do { 1653 do {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 6ac20a6738f4..f61dc2b72bb2 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1022,7 +1022,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1022 1022
1023 /* The chip-specific entries in the device structure. */ 1023 /* The chip-specific entries in the device structure. */
1024 dev->netdev_ops = &rhine_netdev_ops; 1024 dev->netdev_ops = &rhine_netdev_ops;
1025 dev->ethtool_ops = &netdev_ethtool_ops, 1025 dev->ethtool_ops = &netdev_ethtool_ops;
1026 dev->watchdog_timeo = TX_TIMEOUT; 1026 dev->watchdog_timeo = TX_TIMEOUT;
1027 1027
1028 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 1028 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
@@ -1678,7 +1678,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1678 /* Must use alignment buffer. */ 1678 /* Must use alignment buffer. */
1679 if (skb->len > PKT_BUF_SZ) { 1679 if (skb->len > PKT_BUF_SZ) {
1680 /* packet too long, drop it */ 1680 /* packet too long, drop it */
1681 dev_kfree_skb(skb); 1681 dev_kfree_skb_any(skb);
1682 rp->tx_skbuff[entry] = NULL; 1682 rp->tx_skbuff[entry] = NULL;
1683 dev->stats.tx_dropped++; 1683 dev->stats.tx_dropped++;
1684 return NETDEV_TX_OK; 1684 return NETDEV_TX_OK;
@@ -1698,7 +1698,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1698 pci_map_single(rp->pdev, skb->data, skb->len, 1698 pci_map_single(rp->pdev, skb->data, skb->len,
1699 PCI_DMA_TODEVICE); 1699 PCI_DMA_TODEVICE);
1700 if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { 1700 if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
1701 dev_kfree_skb(skb); 1701 dev_kfree_skb_any(skb);
1702 rp->tx_skbuff_dma[entry] = 0; 1702 rp->tx_skbuff_dma[entry] = 0;
1703 dev->stats.tx_dropped++; 1703 dev->stats.tx_dropped++;
1704 return NETDEV_TX_OK; 1704 return NETDEV_TX_OK;
@@ -1836,7 +1836,7 @@ static void rhine_tx(struct net_device *dev)
1836 rp->tx_skbuff[entry]->len, 1836 rp->tx_skbuff[entry]->len,
1837 PCI_DMA_TODEVICE); 1837 PCI_DMA_TODEVICE);
1838 } 1838 }
1839 dev_kfree_skb(rp->tx_skbuff[entry]); 1839 dev_consume_skb_any(rp->tx_skbuff[entry]);
1840 rp->tx_skbuff[entry] = NULL; 1840 rp->tx_skbuff[entry] = NULL;
1841 entry = (++rp->dirty_tx) % TX_RING_SIZE; 1841 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1842 } 1842 }
@@ -2072,16 +2072,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2072 netdev_stats_to_stats64(stats, &dev->stats); 2072 netdev_stats_to_stats64(stats, &dev->stats);
2073 2073
2074 do { 2074 do {
2075 start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); 2075 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2076 stats->rx_packets = rp->rx_stats.packets; 2076 stats->rx_packets = rp->rx_stats.packets;
2077 stats->rx_bytes = rp->rx_stats.bytes; 2077 stats->rx_bytes = rp->rx_stats.bytes;
2078 } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); 2078 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2079 2079
2080 do { 2080 do {
2081 start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); 2081 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2082 stats->tx_packets = rp->tx_stats.packets; 2082 stats->tx_packets = rp->tx_stats.packets;
2083 stats->tx_bytes = rp->tx_stats.bytes; 2083 stats->tx_bytes = rp->tx_stats.bytes;
2084 } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); 2084 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2085 2085
2086 return stats; 2086 return stats;
2087} 2087}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ad61d26a44f3..de08e86db209 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2565,7 +2565,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2565 /* The hardware can handle at most 7 memory segments, so merge 2565 /* The hardware can handle at most 7 memory segments, so merge
2566 * the skb if there are more */ 2566 * the skb if there are more */
2567 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { 2567 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2568 kfree_skb(skb); 2568 dev_kfree_skb_any(skb);
2569 return NETDEV_TX_OK; 2569 return NETDEV_TX_OK;
2570 } 2570 }
2571 2571
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0df36c6ec7f4..104d46f37969 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -641,11 +641,10 @@ static int w5100_hw_probe(struct platform_device *pdev)
641 if (!mem) 641 if (!mem)
642 return -ENXIO; 642 return -ENXIO;
643 mem_size = resource_size(mem); 643 mem_size = resource_size(mem);
644 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) 644
645 return -EBUSY; 645 priv->base = devm_ioremap_resource(&pdev->dev, mem);
646 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 646 if (IS_ERR(priv->base))
647 if (!priv->base) 647 return PTR_ERR(priv->base);
648 return -EBUSY;
649 648
650 spin_lock_init(&priv->reg_lock); 649 spin_lock_init(&priv->reg_lock);
651 priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; 650 priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 71c27b3292f1..1f33c4c86c20 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -561,11 +561,10 @@ static int w5300_hw_probe(struct platform_device *pdev)
561 if (!mem) 561 if (!mem)
562 return -ENXIO; 562 return -ENXIO;
563 mem_size = resource_size(mem); 563 mem_size = resource_size(mem);
564 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) 564
565 return -EBUSY; 565 priv->base = devm_ioremap_resource(&pdev->dev, mem);
566 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 566 if (IS_ERR(priv->base))
567 if (!priv->base) 567 return PTR_ERR(priv->base);
568 return -EBUSY;
569 568
570 spin_lock_init(&priv->reg_lock); 569 spin_lock_init(&priv->reg_lock);
571 priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; 570 priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a4347508031c..fa193c4688da 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -771,8 +771,8 @@ static void ll_temac_recv(struct net_device *ndev)
771 771
772 /* if we're doing rx csum offload, set it up */ 772 /* if we're doing rx csum offload, set it up */
773 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && 773 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
774 (skb->protocol == __constant_htons(ETH_P_IP)) && 774 (skb->protocol == htons(ETH_P_IP)) &&
775 (skb->len > 64)) { 775 (skb->len > 64)) {
776 776
777 skb->csum = cur_p->app3 & 0xFFFF; 777 skb->csum = cur_p->app3 & 0xFFFF;
778 skb->ip_summed = CHECKSUM_COMPLETE; 778 skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4bfdf8c7ada0..7b0a73556264 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -756,7 +756,7 @@ static void axienet_recv(struct net_device *ndev)
756 skb->ip_summed = CHECKSUM_UNNECESSARY; 756 skb->ip_summed = CHECKSUM_UNNECESSARY;
757 } 757 }
758 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 758 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
759 skb->protocol == __constant_htons(ETH_P_IP) && 759 skb->protocol == htons(ETH_P_IP) &&
760 skb->len > 64) { 760 skb->len > 64) {
761 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 761 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
762 skb->ip_summed = CHECKSUM_COMPLETE; 762 skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 36052b98b3fc..0d87c67a5ff7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -795,18 +795,6 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
795} 795}
796 796
797/** 797/**
798 * xemaclite_mdio_reset - Reset the mdio bus.
799 * @bus: Pointer to the MII bus
800 *
801 * This function is required(?) as per Documentation/networking/phy.txt.
802 * There is no reset in this device; this function always returns 0.
803 */
804static int xemaclite_mdio_reset(struct mii_bus *bus)
805{
806 return 0;
807}
808
809/**
810 * xemaclite_mdio_setup - Register mii_bus for the Emaclite device 798 * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
811 * @lp: Pointer to the Emaclite device private data 799 * @lp: Pointer to the Emaclite device private data
812 * @ofdev: Pointer to OF device structure 800 * @ofdev: Pointer to OF device structure
@@ -861,7 +849,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
861 bus->name = "Xilinx Emaclite MDIO"; 849 bus->name = "Xilinx Emaclite MDIO";
862 bus->read = xemaclite_mdio_read; 850 bus->read = xemaclite_mdio_read;
863 bus->write = xemaclite_mdio_write; 851 bus->write = xemaclite_mdio_write;
864 bus->reset = xemaclite_mdio_reset;
865 bus->parent = dev; 852 bus->parent = dev;
866 bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ 853 bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
867 854
@@ -1037,7 +1024,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1037 skb_tx_timestamp(new_skb); 1024 skb_tx_timestamp(new_skb);
1038 1025
1039 dev->stats.tx_bytes += len; 1026 dev->stats.tx_bytes += len;
1040 dev_kfree_skb(new_skb); 1027 dev_consume_skb_any(new_skb);
1041 1028
1042 return 0; 1029 return 0;
1043} 1030}
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 3f431019e615..b81bc9fca378 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -23,6 +23,7 @@ config IXP4XX_ETH
23 tristate "Intel IXP4xx Ethernet support" 23 tristate "Intel IXP4xx Ethernet support"
24 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR 24 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
25 select PHYLIB 25 select PHYLIB
26 select NET_PTP_CLASSIFY
26 ---help--- 27 ---help---
27 Say Y here if you want to use built-in Ethernet ports 28 Say Y here if you want to use built-in Ethernet ports
28 on IXP4xx processor. 29 on IXP4xx processor.
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 25283f17d82f..f7e0f0f7c2e2 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -256,10 +256,6 @@ static int ports_open;
256static struct port *npe_port_tab[MAX_NPES]; 256static struct port *npe_port_tab[MAX_NPES];
257static struct dma_pool *dma_pool; 257static struct dma_pool *dma_pool;
258 258
259static struct sock_filter ptp_filter[] = {
260 PTP_FILTER
261};
262
263static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) 259static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
264{ 260{
265 u8 *data = skb->data; 261 u8 *data = skb->data;
@@ -267,7 +263,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
267 u16 *hi, *id; 263 u16 *hi, *id;
268 u32 lo; 264 u32 lo;
269 265
270 if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4) 266 if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
271 return 0; 267 return 0;
272 268
273 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 269 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -1413,11 +1409,6 @@ static int eth_init_one(struct platform_device *pdev)
1413 char phy_id[MII_BUS_ID_SIZE + 3]; 1409 char phy_id[MII_BUS_ID_SIZE + 3];
1414 int err; 1410 int err;
1415 1411
1416 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
1417 pr_err("ixp4xx_eth: bad ptp filter\n");
1418 return -EINVAL;
1419 }
1420
1421 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1412 if (!(dev = alloc_etherdev(sizeof(struct port))))
1422 return -ENOMEM; 1413 return -ENOMEM;
1423 1414
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 61dd2447e1bb..81901659cc9e 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1184,7 +1184,7 @@ static void __exit yam_cleanup_driver(void)
1184 struct yam_mcs *p; 1184 struct yam_mcs *p;
1185 int i; 1185 int i;
1186 1186
1187 del_timer(&yam_timer); 1187 del_timer_sync(&yam_timer);
1188 for (i = 0; i < NR_PORTS; i++) { 1188 for (i = 0; i < NR_PORTS; i++) {
1189 struct net_device *dev = yam_devs[i]; 1189 struct net_device *dev = yam_devs[i];
1190 if (dev) { 1190 if (dev) {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 7b594ce3f21d..13010b4dae5b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -30,6 +30,7 @@
30 30
31/* Fwd declaration */ 31/* Fwd declaration */
32struct hv_netvsc_packet; 32struct hv_netvsc_packet;
33struct ndis_tcp_ip_checksum_info;
33 34
34/* Represent the xfer page packet which contains 1 or more netvsc packet */ 35/* Represent the xfer page packet which contains 1 or more netvsc packet */
35struct xferpage_packet { 36struct xferpage_packet {
@@ -73,7 +74,7 @@ struct hv_netvsc_packet {
73 } completion; 74 } completion;
74 75
75 /* This points to the memory after page_buf */ 76 /* This points to the memory after page_buf */
76 void *extension; 77 struct rndis_message *rndis_msg;
77 78
78 u32 total_data_buflen; 79 u32 total_data_buflen;
79 /* Points to the send/receive buffer where the ethernet frame is */ 80 /* Points to the send/receive buffer where the ethernet frame is */
@@ -117,7 +118,8 @@ int netvsc_send(struct hv_device *device,
117void netvsc_linkstatus_callback(struct hv_device *device_obj, 118void netvsc_linkstatus_callback(struct hv_device *device_obj,
118 unsigned int status); 119 unsigned int status);
119int netvsc_recv_callback(struct hv_device *device_obj, 120int netvsc_recv_callback(struct hv_device *device_obj,
120 struct hv_netvsc_packet *packet); 121 struct hv_netvsc_packet *packet,
122 struct ndis_tcp_ip_checksum_info *csum_info);
121int rndis_filter_open(struct hv_device *dev); 123int rndis_filter_open(struct hv_device *dev);
122int rndis_filter_close(struct hv_device *dev); 124int rndis_filter_close(struct hv_device *dev);
123int rndis_filter_device_add(struct hv_device *dev, 125int rndis_filter_device_add(struct hv_device *dev,
@@ -126,11 +128,6 @@ void rndis_filter_device_remove(struct hv_device *dev);
126int rndis_filter_receive(struct hv_device *dev, 128int rndis_filter_receive(struct hv_device *dev,
127 struct hv_netvsc_packet *pkt); 129 struct hv_netvsc_packet *pkt);
128 130
129
130
131int rndis_filter_send(struct hv_device *dev,
132 struct hv_netvsc_packet *pkt);
133
134int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); 131int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
135int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac); 132int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
136 133
@@ -139,6 +136,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
139 136
140#define NVSP_PROTOCOL_VERSION_1 2 137#define NVSP_PROTOCOL_VERSION_1 2
141#define NVSP_PROTOCOL_VERSION_2 0x30002 138#define NVSP_PROTOCOL_VERSION_2 0x30002
139#define NVSP_PROTOCOL_VERSION_4 0x40000
140#define NVSP_PROTOCOL_VERSION_5 0x50000
142 141
143enum { 142enum {
144 NVSP_MSG_TYPE_NONE = 0, 143 NVSP_MSG_TYPE_NONE = 0,
@@ -193,6 +192,23 @@ enum {
193 192
194 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE, 193 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
195 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP, 194 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
195
196 NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
197
198 /* Version 4 messages */
199 NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
200 NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
201 NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
202
203 NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
204
205 /* Version 5 messages */
206 NVSP_MSG5_TYPE_OID_QUERY_EX,
207 NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
208 NVSP_MSG5_TYPE_SUBCHANNEL,
209 NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
210
211 NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
196}; 212};
197 213
198enum { 214enum {
@@ -447,10 +463,44 @@ union nvsp_2_message_uber {
447 struct nvsp_2_free_rxbuf free_rxbuf; 463 struct nvsp_2_free_rxbuf free_rxbuf;
448} __packed; 464} __packed;
449 465
466enum nvsp_subchannel_operation {
467 NVSP_SUBCHANNEL_NONE = 0,
468 NVSP_SUBCHANNEL_ALLOCATE,
469 NVSP_SUBCHANNEL_MAX
470};
471
472struct nvsp_5_subchannel_request {
473 u32 op;
474 u32 num_subchannels;
475} __packed;
476
477struct nvsp_5_subchannel_complete {
478 u32 status;
479 u32 num_subchannels; /* Actual number of subchannels allocated */
480} __packed;
481
482struct nvsp_5_send_indirect_table {
483 /* The number of entries in the send indirection table */
484 u32 count;
485
486 /* The offset of the send indireciton table from top of this struct.
487 * The send indirection table tells which channel to put the send
488 * traffic on. Each entry is a channel number.
489 */
490 u32 offset;
491} __packed;
492
493union nvsp_5_message_uber {
494 struct nvsp_5_subchannel_request subchn_req;
495 struct nvsp_5_subchannel_complete subchn_comp;
496 struct nvsp_5_send_indirect_table send_table;
497} __packed;
498
450union nvsp_all_messages { 499union nvsp_all_messages {
451 union nvsp_message_init_uber init_msg; 500 union nvsp_message_init_uber init_msg;
452 union nvsp_1_message_uber v1_msg; 501 union nvsp_1_message_uber v1_msg;
453 union nvsp_2_message_uber v2_msg; 502 union nvsp_2_message_uber v2_msg;
503 union nvsp_5_message_uber v5_msg;
454} __packed; 504} __packed;
455 505
456/* ALL Messages */ 506/* ALL Messages */
@@ -463,6 +513,7 @@ struct nvsp_message {
463#define NETVSC_MTU 65536 513#define NETVSC_MTU 65536
464 514
465#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ 515#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
516#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
466 517
467#define NETVSC_RECEIVE_BUFFER_ID 0xcafe 518#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
468 519
@@ -506,6 +557,8 @@ struct netvsc_device {
506 557
507 /* Holds rndis device info */ 558 /* Holds rndis device info */
508 void *extension; 559 void *extension;
560 /* The recive buffer for this device */
561 unsigned char cb_buffer[NETVSC_PACKET_SIZE];
509}; 562};
510 563
511/* NdisInitialize message */ 564/* NdisInitialize message */
@@ -671,9 +724,133 @@ struct ndis_pkt_8021q_info {
671 }; 724 };
672}; 725};
673 726
727struct ndis_oject_header {
728 u8 type;
729 u8 revision;
730 u16 size;
731};
732
733#define NDIS_OBJECT_TYPE_DEFAULT 0x80
734#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
735#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
736#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
737#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
738#define NDIS_OFFLOAD_PARAMETERS_LSOV1_ENABLED 2
739#define NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED 1
740#define NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED 2
741#define NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED 1
742#define NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED 2
743#define NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED 3
744#define NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED 4
745
746#define NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE 1
747#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
748#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
749
750/*
751 * New offload OIDs for NDIS 6
752 */
753#define OID_TCP_OFFLOAD_CURRENT_CONFIG 0xFC01020B /* query only */
754#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C /* set only */
755#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D/* query only */
756#define OID_TCP_CONNECTION_OFFLOAD_CURRENT_CONFIG 0xFC01020E /* query only */
757#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
758#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
759
760struct ndis_offload_params {
761 struct ndis_oject_header header;
762 u8 ip_v4_csum;
763 u8 tcp_ip_v4_csum;
764 u8 udp_ip_v4_csum;
765 u8 tcp_ip_v6_csum;
766 u8 udp_ip_v6_csum;
767 u8 lso_v1;
768 u8 ip_sec_v1;
769 u8 lso_v2_ipv4;
770 u8 lso_v2_ipv6;
771 u8 tcp_connection_ip_v4;
772 u8 tcp_connection_ip_v6;
773 u32 flags;
774 u8 ip_sec_v2;
775 u8 ip_sec_v2_ip_v4;
776 struct {
777 u8 rsc_ip_v4;
778 u8 rsc_ip_v6;
779 };
780 struct {
781 u8 encapsulated_packet_task_offload;
782 u8 encapsulation_types;
783 };
784};
785
786struct ndis_tcp_ip_checksum_info {
787 union {
788 struct {
789 u32 is_ipv4:1;
790 u32 is_ipv6:1;
791 u32 tcp_checksum:1;
792 u32 udp_checksum:1;
793 u32 ip_header_checksum:1;
794 u32 reserved:11;
795 u32 tcp_header_offset:10;
796 } transmit;
797 struct {
798 u32 tcp_checksum_failed:1;
799 u32 udp_checksum_failed:1;
800 u32 ip_checksum_failed:1;
801 u32 tcp_checksum_succeeded:1;
802 u32 udp_checksum_succeeded:1;
803 u32 ip_checksum_succeeded:1;
804 u32 loopback:1;
805 u32 tcp_checksum_value_invalid:1;
806 u32 ip_checksum_value_invalid:1;
807 } receive;
808 u32 value;
809 };
810};
811
812struct ndis_tcp_lso_info {
813 union {
814 struct {
815 u32 unused:30;
816 u32 type:1;
817 u32 reserved2:1;
818 } transmit;
819 struct {
820 u32 mss:20;
821 u32 tcp_header_offset:10;
822 u32 type:1;
823 u32 reserved2:1;
824 } lso_v1_transmit;
825 struct {
826 u32 tcp_payload:30;
827 u32 type:1;
828 u32 reserved2:1;
829 } lso_v1_transmit_complete;
830 struct {
831 u32 mss:20;
832 u32 tcp_header_offset:10;
833 u32 type:1;
834 u32 ip_version:1;
835 } lso_v2_transmit;
836 struct {
837 u32 reserved:30;
838 u32 type:1;
839 u32 reserved2:1;
840 } lso_v2_transmit_complete;
841 u32 value;
842 };
843};
844
674#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \ 845#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
675 sizeof(struct ndis_pkt_8021q_info)) 846 sizeof(struct ndis_pkt_8021q_info))
676 847
848#define NDIS_CSUM_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
849 sizeof(struct ndis_tcp_ip_checksum_info))
850
851#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
852 sizeof(struct ndis_tcp_lso_info))
853
677/* Format of Information buffer passed in a SetRequest for the OID */ 854/* Format of Information buffer passed in a SetRequest for the OID */
678/* OID_GEN_RNDIS_CONFIG_PARAMETER. */ 855/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
679struct rndis_config_parameter_info { 856struct rndis_config_parameter_info {
@@ -846,12 +1023,6 @@ struct rndis_message {
846}; 1023};
847 1024
848 1025
849struct rndis_filter_packet {
850 void *completion_ctx;
851 void (*completion)(void *context);
852 struct rndis_message msg;
853};
854
855/* Handy macros */ 1026/* Handy macros */
856 1027
857/* get the size of an RNDIS message. Pass in the message type, */ 1028/* get the size of an RNDIS message. Pass in the message type, */
@@ -905,6 +1076,16 @@ struct rndis_filter_packet {
905#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400 1076#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
906#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800 1077#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
907 1078
1079#define INFO_IPV4 2
1080#define INFO_IPV6 4
1081#define INFO_TCP 2
1082#define INFO_UDP 4
1083
1084#define TRANSPORT_INFO_NOT_IP 0
1085#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
1086#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
1087#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
1088#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
908 1089
909 1090
910#endif /* _HYPERV_NET_H */ 1091#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 03a2c6e17158..daddea2654ce 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -290,7 +290,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
290 NVSP_STAT_SUCCESS) 290 NVSP_STAT_SUCCESS)
291 return -EINVAL; 291 return -EINVAL;
292 292
293 if (nvsp_ver != NVSP_PROTOCOL_VERSION_2) 293 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
294 return 0; 294 return 0;
295 295
296 /* NVSPv2 only: Send NDIS config */ 296 /* NVSPv2 only: Send NDIS config */
@@ -314,6 +314,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
314 struct nvsp_message *init_packet; 314 struct nvsp_message *init_packet;
315 int ndis_version; 315 int ndis_version;
316 struct net_device *ndev; 316 struct net_device *ndev;
317 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
318 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
319 int i, num_ver = 4; /* number of different NVSP versions */
317 320
318 net_device = get_outbound_net_device(device); 321 net_device = get_outbound_net_device(device);
319 if (!net_device) 322 if (!net_device)
@@ -323,13 +326,14 @@ static int netvsc_connect_vsp(struct hv_device *device)
323 init_packet = &net_device->channel_init_pkt; 326 init_packet = &net_device->channel_init_pkt;
324 327
325 /* Negotiate the latest NVSP protocol supported */ 328 /* Negotiate the latest NVSP protocol supported */
326 if (negotiate_nvsp_ver(device, net_device, init_packet, 329 for (i = num_ver - 1; i >= 0; i--)
327 NVSP_PROTOCOL_VERSION_2) == 0) { 330 if (negotiate_nvsp_ver(device, net_device, init_packet,
328 net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2; 331 ver_list[i]) == 0) {
329 } else if (negotiate_nvsp_ver(device, net_device, init_packet, 332 net_device->nvsp_version = ver_list[i];
330 NVSP_PROTOCOL_VERSION_1) == 0) { 333 break;
331 net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1; 334 }
332 } else { 335
336 if (i < 0) {
333 ret = -EPROTO; 337 ret = -EPROTO;
334 goto cleanup; 338 goto cleanup;
335 } 339 }
@@ -339,7 +343,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
339 /* Send the ndis version */ 343 /* Send the ndis version */
340 memset(init_packet, 0, sizeof(struct nvsp_message)); 344 memset(init_packet, 0, sizeof(struct nvsp_message));
341 345
342 ndis_version = 0x00050001; 346 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
347 ndis_version = 0x00050001;
348 else
349 ndis_version = 0x0006001e;
343 350
344 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; 351 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
345 init_packet->msg.v1_msg. 352 init_packet->msg.v1_msg.
@@ -358,6 +365,11 @@ static int netvsc_connect_vsp(struct hv_device *device)
358 goto cleanup; 365 goto cleanup;
359 366
360 /* Post the big receive buffer to NetVSP */ 367 /* Post the big receive buffer to NetVSP */
368 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
369 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
370 else
371 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
372
361 ret = netvsc_init_recv_buf(device); 373 ret = netvsc_init_recv_buf(device);
362 374
363cleanup: 375cleanup:
@@ -432,17 +444,14 @@ static inline u32 hv_ringbuf_avail_percent(
432 return avail_write * 100 / ring_info->ring_datasize; 444 return avail_write * 100 / ring_info->ring_datasize;
433} 445}
434 446
435static void netvsc_send_completion(struct hv_device *device, 447static void netvsc_send_completion(struct netvsc_device *net_device,
448 struct hv_device *device,
436 struct vmpacket_descriptor *packet) 449 struct vmpacket_descriptor *packet)
437{ 450{
438 struct netvsc_device *net_device;
439 struct nvsp_message *nvsp_packet; 451 struct nvsp_message *nvsp_packet;
440 struct hv_netvsc_packet *nvsc_packet; 452 struct hv_netvsc_packet *nvsc_packet;
441 struct net_device *ndev; 453 struct net_device *ndev;
442 454
443 net_device = get_inbound_net_device(device);
444 if (!net_device)
445 return;
446 ndev = net_device->ndev; 455 ndev = net_device->ndev;
447 456
448 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 457 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
@@ -561,13 +570,13 @@ int netvsc_send(struct hv_device *device,
561} 570}
562 571
563static void netvsc_send_recv_completion(struct hv_device *device, 572static void netvsc_send_recv_completion(struct hv_device *device,
573 struct netvsc_device *net_device,
564 u64 transaction_id, u32 status) 574 u64 transaction_id, u32 status)
565{ 575{
566 struct nvsp_message recvcompMessage; 576 struct nvsp_message recvcompMessage;
567 int retries = 0; 577 int retries = 0;
568 int ret; 578 int ret;
569 struct net_device *ndev; 579 struct net_device *ndev;
570 struct netvsc_device *net_device = hv_get_drvdata(device);
571 580
572 ndev = net_device->ndev; 581 ndev = net_device->ndev;
573 582
@@ -653,14 +662,15 @@ static void netvsc_receive_completion(void *context)
653 662
654 /* Send a receive completion for the xfer page packet */ 663 /* Send a receive completion for the xfer page packet */
655 if (fsend_receive_comp) 664 if (fsend_receive_comp)
656 netvsc_send_recv_completion(device, transaction_id, status); 665 netvsc_send_recv_completion(device, net_device, transaction_id,
666 status);
657 667
658} 668}
659 669
660static void netvsc_receive(struct hv_device *device, 670static void netvsc_receive(struct netvsc_device *net_device,
661 struct vmpacket_descriptor *packet) 671 struct hv_device *device,
672 struct vmpacket_descriptor *packet)
662{ 673{
663 struct netvsc_device *net_device;
664 struct vmtransfer_page_packet_header *vmxferpage_packet; 674 struct vmtransfer_page_packet_header *vmxferpage_packet;
665 struct nvsp_message *nvsp_packet; 675 struct nvsp_message *nvsp_packet;
666 struct hv_netvsc_packet *netvsc_packet = NULL; 676 struct hv_netvsc_packet *netvsc_packet = NULL;
@@ -673,9 +683,6 @@ static void netvsc_receive(struct hv_device *device,
673 683
674 LIST_HEAD(listHead); 684 LIST_HEAD(listHead);
675 685
676 net_device = get_inbound_net_device(device);
677 if (!net_device)
678 return;
679 ndev = net_device->ndev; 686 ndev = net_device->ndev;
680 687
681 /* 688 /*
@@ -741,7 +748,7 @@ static void netvsc_receive(struct hv_device *device,
741 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, 748 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
742 flags); 749 flags);
743 750
744 netvsc_send_recv_completion(device, 751 netvsc_send_recv_completion(device, net_device,
745 vmxferpage_packet->d.trans_id, 752 vmxferpage_packet->d.trans_id,
746 NVSP_STAT_FAIL); 753 NVSP_STAT_FAIL);
747 754
@@ -800,22 +807,16 @@ static void netvsc_channel_cb(void *context)
800 struct netvsc_device *net_device; 807 struct netvsc_device *net_device;
801 u32 bytes_recvd; 808 u32 bytes_recvd;
802 u64 request_id; 809 u64 request_id;
803 unsigned char *packet;
804 struct vmpacket_descriptor *desc; 810 struct vmpacket_descriptor *desc;
805 unsigned char *buffer; 811 unsigned char *buffer;
806 int bufferlen = NETVSC_PACKET_SIZE; 812 int bufferlen = NETVSC_PACKET_SIZE;
807 struct net_device *ndev; 813 struct net_device *ndev;
808 814
809 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
810 GFP_ATOMIC);
811 if (!packet)
812 return;
813 buffer = packet;
814
815 net_device = get_inbound_net_device(device); 815 net_device = get_inbound_net_device(device);
816 if (!net_device) 816 if (!net_device)
817 goto out; 817 return;
818 ndev = net_device->ndev; 818 ndev = net_device->ndev;
819 buffer = net_device->cb_buffer;
819 820
820 do { 821 do {
821 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen, 822 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -825,11 +826,13 @@ static void netvsc_channel_cb(void *context)
825 desc = (struct vmpacket_descriptor *)buffer; 826 desc = (struct vmpacket_descriptor *)buffer;
826 switch (desc->type) { 827 switch (desc->type) {
827 case VM_PKT_COMP: 828 case VM_PKT_COMP:
828 netvsc_send_completion(device, desc); 829 netvsc_send_completion(net_device,
830 device, desc);
829 break; 831 break;
830 832
831 case VM_PKT_DATA_USING_XFER_PAGES: 833 case VM_PKT_DATA_USING_XFER_PAGES:
832 netvsc_receive(device, desc); 834 netvsc_receive(net_device,
835 device, desc);
833 break; 836 break;
834 837
835 default: 838 default:
@@ -841,23 +844,16 @@ static void netvsc_channel_cb(void *context)
841 break; 844 break;
842 } 845 }
843 846
844 /* reset */
845 if (bufferlen > NETVSC_PACKET_SIZE) {
846 kfree(buffer);
847 buffer = packet;
848 bufferlen = NETVSC_PACKET_SIZE;
849 }
850 } else { 847 } else {
851 /* reset */ 848 /*
852 if (bufferlen > NETVSC_PACKET_SIZE) { 849 * We are done for this pass.
853 kfree(buffer); 850 */
854 buffer = packet;
855 bufferlen = NETVSC_PACKET_SIZE;
856 }
857
858 break; 851 break;
859 } 852 }
853
860 } else if (ret == -ENOBUFS) { 854 } else if (ret == -ENOBUFS) {
855 if (bufferlen > NETVSC_PACKET_SIZE)
856 kfree(buffer);
861 /* Handle large packet */ 857 /* Handle large packet */
862 buffer = kmalloc(bytes_recvd, GFP_ATOMIC); 858 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
863 if (buffer == NULL) { 859 if (buffer == NULL) {
@@ -872,8 +868,8 @@ static void netvsc_channel_cb(void *context)
872 } 868 }
873 } while (1); 869 } while (1);
874 870
875out: 871 if (bufferlen > NETVSC_PACKET_SIZE)
876 kfree(buffer); 872 kfree(buffer);
877 return; 873 return;
878} 874}
879 875
@@ -907,7 +903,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
907 ndev = net_device->ndev; 903 ndev = net_device->ndev;
908 904
909 /* Initialize the NetVSC channel extension */ 905 /* Initialize the NetVSC channel extension */
910 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
911 spin_lock_init(&net_device->recv_pkt_list_lock); 906 spin_lock_init(&net_device->recv_pkt_list_lock);
912 907
913 INIT_LIST_HEAD(&net_device->recv_pkt_list); 908 INIT_LIST_HEAD(&net_device->recv_pkt_list);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6fce9750b95..4e4cf9e0c8d7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -128,6 +128,27 @@ static int netvsc_close(struct net_device *net)
128 return ret; 128 return ret;
129} 129}
130 130
131static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
132 int pkt_type)
133{
134 struct rndis_packet *rndis_pkt;
135 struct rndis_per_packet_info *ppi;
136
137 rndis_pkt = &msg->msg.pkt;
138 rndis_pkt->data_offset += ppi_size;
139
140 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
141 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
142
143 ppi->size = ppi_size;
144 ppi->type = pkt_type;
145 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
146
147 rndis_pkt->per_pkt_info_len += ppi_size;
148
149 return ppi;
150}
151
131static void netvsc_xmit_completion(void *context) 152static void netvsc_xmit_completion(void *context)
132{ 153{
133 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; 154 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
@@ -140,22 +161,164 @@ static void netvsc_xmit_completion(void *context)
140 dev_kfree_skb_any(skb); 161 dev_kfree_skb_any(skb);
141} 162}
142 163
164static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
165 struct hv_page_buffer *pb)
166{
167 int j = 0;
168
169 /* Deal with compund pages by ignoring unused part
170 * of the page.
171 */
172 page += (offset >> PAGE_SHIFT);
173 offset &= ~PAGE_MASK;
174
175 while (len > 0) {
176 unsigned long bytes;
177
178 bytes = PAGE_SIZE - offset;
179 if (bytes > len)
180 bytes = len;
181 pb[j].pfn = page_to_pfn(page);
182 pb[j].offset = offset;
183 pb[j].len = bytes;
184
185 offset += bytes;
186 len -= bytes;
187
188 if (offset == PAGE_SIZE && len) {
189 page++;
190 offset = 0;
191 j++;
192 }
193 }
194
195 return j + 1;
196}
197
198static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
199 struct hv_page_buffer *pb)
200{
201 u32 slots_used = 0;
202 char *data = skb->data;
203 int frags = skb_shinfo(skb)->nr_frags;
204 int i;
205
206 /* The packet is laid out thus:
207 * 1. hdr
208 * 2. skb linear data
209 * 3. skb fragment data
210 */
211 if (hdr != NULL)
212 slots_used += fill_pg_buf(virt_to_page(hdr),
213 offset_in_page(hdr),
214 len, &pb[slots_used]);
215
216 slots_used += fill_pg_buf(virt_to_page(data),
217 offset_in_page(data),
218 skb_headlen(skb), &pb[slots_used]);
219
220 for (i = 0; i < frags; i++) {
221 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
222
223 slots_used += fill_pg_buf(skb_frag_page(frag),
224 frag->page_offset,
225 skb_frag_size(frag), &pb[slots_used]);
226 }
227 return slots_used;
228}
229
230static int count_skb_frag_slots(struct sk_buff *skb)
231{
232 int i, frags = skb_shinfo(skb)->nr_frags;
233 int pages = 0;
234
235 for (i = 0; i < frags; i++) {
236 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
237 unsigned long size = skb_frag_size(frag);
238 unsigned long offset = frag->page_offset;
239
240 /* Skip unused frames from start of page */
241 offset &= ~PAGE_MASK;
242 pages += PFN_UP(offset + size);
243 }
244 return pages;
245}
246
247static int netvsc_get_slots(struct sk_buff *skb)
248{
249 char *data = skb->data;
250 unsigned int offset = offset_in_page(data);
251 unsigned int len = skb_headlen(skb);
252 int slots;
253 int frag_slots;
254
255 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
256 frag_slots = count_skb_frag_slots(skb);
257 return slots + frag_slots;
258}
259
260static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
261{
262 u32 ret_val = TRANSPORT_INFO_NOT_IP;
263
264 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
265 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
266 goto not_ip;
267 }
268
269 *trans_off = skb_transport_offset(skb);
270
271 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
272 struct iphdr *iphdr = ip_hdr(skb);
273
274 if (iphdr->protocol == IPPROTO_TCP)
275 ret_val = TRANSPORT_INFO_IPV4_TCP;
276 else if (iphdr->protocol == IPPROTO_UDP)
277 ret_val = TRANSPORT_INFO_IPV4_UDP;
278 } else {
279 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
280 ret_val = TRANSPORT_INFO_IPV6_TCP;
281 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
282 ret_val = TRANSPORT_INFO_IPV6_UDP;
283 }
284
285not_ip:
286 return ret_val;
287}
288
143static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 289static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
144{ 290{
145 struct net_device_context *net_device_ctx = netdev_priv(net); 291 struct net_device_context *net_device_ctx = netdev_priv(net);
146 struct hv_netvsc_packet *packet; 292 struct hv_netvsc_packet *packet;
147 int ret; 293 int ret;
148 unsigned int i, num_pages, npg_data; 294 unsigned int num_data_pgs;
149 295 struct rndis_message *rndis_msg;
150 /* Add multipages for skb->data and additional 2 for RNDIS */ 296 struct rndis_packet *rndis_pkt;
151 npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1) 297 u32 rndis_msg_size;
152 >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1; 298 bool isvlan;
153 num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2; 299 struct rndis_per_packet_info *ppi;
300 struct ndis_tcp_ip_checksum_info *csum_info;
301 struct ndis_tcp_lso_info *lso_info;
302 int hdr_offset;
303 u32 net_trans_info;
304
305
306 /* We will atmost need two pages to describe the rndis
307 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
308 * of pages in a single packet.
309 */
310 num_data_pgs = netvsc_get_slots(skb) + 2;
311 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
312 netdev_err(net, "Packet too big: %u\n", skb->len);
313 dev_kfree_skb(skb);
314 net->stats.tx_dropped++;
315 return NETDEV_TX_OK;
316 }
154 317
155 /* Allocate a netvsc packet based on # of frags. */ 318 /* Allocate a netvsc packet based on # of frags. */
156 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 319 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
157 (num_pages * sizeof(struct hv_page_buffer)) + 320 (num_data_pgs * sizeof(struct hv_page_buffer)) +
158 sizeof(struct rndis_filter_packet) + 321 sizeof(struct rndis_message) +
159 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC); 322 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
160 if (!packet) { 323 if (!packet) {
161 /* out of memory, drop packet */ 324 /* out of memory, drop packet */
@@ -168,53 +331,111 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
168 331
169 packet->vlan_tci = skb->vlan_tci; 332 packet->vlan_tci = skb->vlan_tci;
170 333
171 packet->extension = (void *)(unsigned long)packet + 334 packet->is_data_pkt = true;
335 packet->total_data_buflen = skb->len;
336
337 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
172 sizeof(struct hv_netvsc_packet) + 338 sizeof(struct hv_netvsc_packet) +
173 (num_pages * sizeof(struct hv_page_buffer)); 339 (num_data_pgs * sizeof(struct hv_page_buffer)));
340
341 /* Set the completion routine */
342 packet->completion.send.send_completion = netvsc_xmit_completion;
343 packet->completion.send.send_completion_ctx = packet;
344 packet->completion.send.send_completion_tid = (unsigned long)skb;
174 345
175 /* If the rndis msg goes beyond 1 page, we will add 1 later */ 346 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
176 packet->page_buf_cnt = num_pages - 1; 347
348 /* Add the rndis header */
349 rndis_msg = packet->rndis_msg;
350 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
351 rndis_msg->msg_len = packet->total_data_buflen;
352 rndis_pkt = &rndis_msg->msg.pkt;
353 rndis_pkt->data_offset = sizeof(struct rndis_packet);
354 rndis_pkt->data_len = packet->total_data_buflen;
355 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
356
357 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
358
359 if (isvlan) {
360 struct ndis_pkt_8021q_info *vlan;
361
362 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
363 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
364 IEEE_8021Q_INFO);
365 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
366 ppi->ppi_offset);
367 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
368 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
369 VLAN_PRIO_SHIFT;
370 }
177 371
178 /* Initialize it from the skb */ 372 net_trans_info = get_net_transport_info(skb, &hdr_offset);
179 packet->total_data_buflen = skb->len; 373 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
374 goto do_send;
375
376 /*
377 * Setup the sendside checksum offload only if this is not a
378 * GSO packet.
379 */
380 if (skb_is_gso(skb))
381 goto do_lso;
382
383 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
384 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
385 TCPIP_CHKSUM_PKTINFO);
386
387 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
388 ppi->ppi_offset);
180 389
181 /* Start filling in the page buffers starting after RNDIS buffer. */ 390 if (net_trans_info & (INFO_IPV4 << 16))
182 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT; 391 csum_info->transmit.is_ipv4 = 1;
183 packet->page_buf[1].offset
184 = (unsigned long)skb->data & (PAGE_SIZE - 1);
185 if (npg_data == 1)
186 packet->page_buf[1].len = skb_headlen(skb);
187 else 392 else
188 packet->page_buf[1].len = PAGE_SIZE 393 csum_info->transmit.is_ipv6 = 1;
189 - packet->page_buf[1].offset; 394
190 395 if (net_trans_info & INFO_TCP) {
191 for (i = 2; i <= npg_data; i++) { 396 csum_info->transmit.tcp_checksum = 1;
192 packet->page_buf[i].pfn = virt_to_phys(skb->data 397 csum_info->transmit.tcp_header_offset = hdr_offset;
193 + PAGE_SIZE * (i-1)) >> PAGE_SHIFT; 398 } else if (net_trans_info & INFO_UDP) {
194 packet->page_buf[i].offset = 0; 399 csum_info->transmit.udp_checksum = 1;
195 packet->page_buf[i].len = PAGE_SIZE;
196 } 400 }
197 if (npg_data > 1) 401 goto do_send;
198 packet->page_buf[npg_data].len = (((unsigned long)skb->data 402
199 + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1; 403do_lso:
200 404 rndis_msg_size += NDIS_LSO_PPI_SIZE;
201 /* Additional fragments are after SKB data */ 405 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 406 TCP_LARGESEND_PKTINFO);
203 const skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 407
204 408 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
205 packet->page_buf[i+npg_data+1].pfn = 409 ppi->ppi_offset);
206 page_to_pfn(skb_frag_page(f)); 410
207 packet->page_buf[i+npg_data+1].offset = f->page_offset; 411 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
208 packet->page_buf[i+npg_data+1].len = skb_frag_size(f); 412 if (net_trans_info & (INFO_IPV4 << 16)) {
413 lso_info->lso_v2_transmit.ip_version =
414 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
415 ip_hdr(skb)->tot_len = 0;
416 ip_hdr(skb)->check = 0;
417 tcp_hdr(skb)->check =
418 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
419 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
420 } else {
421 lso_info->lso_v2_transmit.ip_version =
422 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
423 ipv6_hdr(skb)->payload_len = 0;
424 tcp_hdr(skb)->check =
425 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
426 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
209 } 427 }
428 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
429 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
210 430
211 /* Set the completion routine */ 431do_send:
212 packet->completion.send.send_completion = netvsc_xmit_completion; 432 /* Start filling in the page buffers with the rndis hdr */
213 packet->completion.send.send_completion_ctx = packet; 433 rndis_msg->msg_len += rndis_msg_size;
214 packet->completion.send.send_completion_tid = (unsigned long)skb; 434 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
435 skb, &packet->page_buf[0]);
436
437 ret = netvsc_send(net_device_ctx->device_ctx, packet);
215 438
216 ret = rndis_filter_send(net_device_ctx->device_ctx,
217 packet);
218 if (ret == 0) { 439 if (ret == 0) {
219 net->stats.tx_bytes += skb->len; 440 net->stats.tx_bytes += skb->len;
220 net->stats.tx_packets++; 441 net->stats.tx_packets++;
@@ -264,7 +485,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
264 * "wire" on the specified device. 485 * "wire" on the specified device.
265 */ 486 */
266int netvsc_recv_callback(struct hv_device *device_obj, 487int netvsc_recv_callback(struct hv_device *device_obj,
267 struct hv_netvsc_packet *packet) 488 struct hv_netvsc_packet *packet,
489 struct ndis_tcp_ip_checksum_info *csum_info)
268{ 490{
269 struct net_device *net; 491 struct net_device *net;
270 struct sk_buff *skb; 492 struct sk_buff *skb;
@@ -291,7 +513,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
291 packet->total_data_buflen); 513 packet->total_data_buflen);
292 514
293 skb->protocol = eth_type_trans(skb, net); 515 skb->protocol = eth_type_trans(skb, net);
294 skb->ip_summed = CHECKSUM_NONE; 516 if (csum_info) {
517 /* We only look at the IP checksum here.
518 * Should we be dropping the packet if checksum
519 * failed? How do we deal with other checksums - TCP/UDP?
520 */
521 if (csum_info->receive.ip_checksum_succeeded)
522 skb->ip_summed = CHECKSUM_UNNECESSARY;
523 else
524 skb->ip_summed = CHECKSUM_NONE;
525 }
526
295 if (packet->vlan_tci & VLAN_TAG_PRESENT) 527 if (packet->vlan_tci & VLAN_TAG_PRESENT)
296 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 528 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
297 packet->vlan_tci); 529 packet->vlan_tci);
@@ -327,7 +559,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
327 if (nvdev == NULL || nvdev->destroy) 559 if (nvdev == NULL || nvdev->destroy)
328 return -ENODEV; 560 return -ENODEV;
329 561
330 if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2) 562 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
331 limit = NETVSC_MTU; 563 limit = NETVSC_MTU;
332 564
333 if (mtu < 68 || mtu > limit) 565 if (mtu < 68 || mtu > limit)
@@ -452,9 +684,10 @@ static int netvsc_probe(struct hv_device *dev,
452 684
453 net->netdev_ops = &device_ops; 685 net->netdev_ops = &device_ops;
454 686
455 /* TODO: Add GSO and Checksum offload */ 687 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
456 net->hw_features = 0; 688 NETIF_F_TSO;
457 net->features = NETIF_F_HW_VLAN_CTAG_TX; 689 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
690 NETIF_F_IP_CSUM | NETIF_F_TSO;
458 691
459 SET_ETHTOOL_OPS(net, &ethtool_ops); 692 SET_ETHTOOL_OPS(net, &ethtool_ops);
460 SET_NETDEV_DEV(net, &dev->device); 693 SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b54fd257652b..4a37e3db9e32 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -58,9 +58,6 @@ struct rndis_request {
58 u8 request_ext[RNDIS_EXT_LEN]; 58 u8 request_ext[RNDIS_EXT_LEN];
59}; 59};
60 60
61static void rndis_filter_send_completion(void *ctx);
62
63
64static struct rndis_device *get_rndis_device(void) 61static struct rndis_device *get_rndis_device(void)
65{ 62{
66 struct rndis_device *device; 63 struct rndis_device *device;
@@ -297,7 +294,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
297 "rndis response buffer overflow " 294 "rndis response buffer overflow "
298 "detected (size %u max %zu)\n", 295 "detected (size %u max %zu)\n",
299 resp->msg_len, 296 resp->msg_len,
300 sizeof(struct rndis_filter_packet)); 297 sizeof(struct rndis_message));
301 298
302 if (resp->ndis_msg_type == 299 if (resp->ndis_msg_type ==
303 RNDIS_MSG_RESET_C) { 300 RNDIS_MSG_RESET_C) {
@@ -373,6 +370,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
373 struct rndis_packet *rndis_pkt; 370 struct rndis_packet *rndis_pkt;
374 u32 data_offset; 371 u32 data_offset;
375 struct ndis_pkt_8021q_info *vlan; 372 struct ndis_pkt_8021q_info *vlan;
373 struct ndis_tcp_ip_checksum_info *csum_info;
376 374
377 rndis_pkt = &msg->msg.pkt; 375 rndis_pkt = &msg->msg.pkt;
378 376
@@ -411,7 +409,8 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
411 pkt->vlan_tci = 0; 409 pkt->vlan_tci = 0;
412 } 410 }
413 411
414 netvsc_recv_callback(dev->net_dev->dev, pkt); 412 csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
413 netvsc_recv_callback(dev->net_dev->dev, pkt, csum_info);
415} 414}
416 415
417int rndis_filter_receive(struct hv_device *dev, 416int rndis_filter_receive(struct hv_device *dev,
@@ -630,6 +629,61 @@ cleanup:
630 return ret; 629 return ret;
631} 630}
632 631
632int rndis_filter_set_offload_params(struct hv_device *hdev,
633 struct ndis_offload_params *req_offloads)
634{
635 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
636 struct rndis_device *rdev = nvdev->extension;
637 struct net_device *ndev = nvdev->ndev;
638 struct rndis_request *request;
639 struct rndis_set_request *set;
640 struct ndis_offload_params *offload_params;
641 struct rndis_set_complete *set_complete;
642 u32 extlen = sizeof(struct ndis_offload_params);
643 int ret, t;
644
645 request = get_rndis_request(rdev, RNDIS_MSG_SET,
646 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
647 if (!request)
648 return -ENOMEM;
649
650 set = &request->request_msg.msg.set_req;
651 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
652 set->info_buflen = extlen;
653 set->info_buf_offset = sizeof(struct rndis_set_request);
654 set->dev_vc_handle = 0;
655
656 offload_params = (struct ndis_offload_params *)((ulong)set +
657 set->info_buf_offset);
658 *offload_params = *req_offloads;
659 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
660 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
661 offload_params->header.size = extlen;
662
663 ret = rndis_filter_send_request(rdev, request);
664 if (ret != 0)
665 goto cleanup;
666
667 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
668 if (t == 0) {
669 netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
670 /* can't put_rndis_request, since we may still receive a
671 * send-completion.
672 */
673 return -EBUSY;
674 } else {
675 set_complete = &request->response_msg.msg.set_complete;
676 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
677 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
678 set_complete->status);
679 ret = -EINVAL;
680 }
681 }
682
683cleanup:
684 put_rndis_request(rdev, request);
685 return ret;
686}
633 687
634static int rndis_filter_query_device_link_status(struct rndis_device *dev) 688static int rndis_filter_query_device_link_status(struct rndis_device *dev)
635{ 689{
@@ -829,6 +883,7 @@ int rndis_filter_device_add(struct hv_device *dev,
829 struct netvsc_device *net_device; 883 struct netvsc_device *net_device;
830 struct rndis_device *rndis_device; 884 struct rndis_device *rndis_device;
831 struct netvsc_device_info *device_info = additional_info; 885 struct netvsc_device_info *device_info = additional_info;
886 struct ndis_offload_params offloads;
832 887
833 rndis_device = get_rndis_device(); 888 rndis_device = get_rndis_device();
834 if (!rndis_device) 889 if (!rndis_device)
@@ -868,6 +923,26 @@ int rndis_filter_device_add(struct hv_device *dev,
868 923
869 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); 924 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
870 925
926 /* Turn on the offloads; the host supports all of the relevant
927 * offloads.
928 */
929 memset(&offloads, 0, sizeof(struct ndis_offload_params));
930 /* A value of zero means "no change"; now turn on what we
931 * want.
932 */
933 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
934 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
935 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
936 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
937 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
938 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
939
940
941 ret = rndis_filter_set_offload_params(dev, &offloads);
942 if (ret)
943 goto err_dev_remv;
944
945
871 rndis_filter_query_device_link_status(rndis_device); 946 rndis_filter_query_device_link_status(rndis_device);
872 947
873 device_info->link_state = rndis_device->link_state; 948 device_info->link_state = rndis_device->link_state;
@@ -877,6 +952,10 @@ int rndis_filter_device_add(struct hv_device *dev,
877 device_info->link_state ? "down" : "up"); 952 device_info->link_state ? "down" : "up");
878 953
879 return ret; 954 return ret;
955
956err_dev_remv:
957 rndis_filter_device_remove(dev);
958 return ret;
880} 959}
881 960
882void rndis_filter_device_remove(struct hv_device *dev) 961void rndis_filter_device_remove(struct hv_device *dev)
@@ -913,101 +992,3 @@ int rndis_filter_close(struct hv_device *dev)
913 992
914 return rndis_filter_close_device(nvdev->extension); 993 return rndis_filter_close_device(nvdev->extension);
915} 994}
916
917int rndis_filter_send(struct hv_device *dev,
918 struct hv_netvsc_packet *pkt)
919{
920 int ret;
921 struct rndis_filter_packet *filter_pkt;
922 struct rndis_message *rndis_msg;
923 struct rndis_packet *rndis_pkt;
924 u32 rndis_msg_size;
925 bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
926
927 /* Add the rndis header */
928 filter_pkt = (struct rndis_filter_packet *)pkt->extension;
929
930 rndis_msg = &filter_pkt->msg;
931 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
932 if (isvlan)
933 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
934
935 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
936 rndis_msg->msg_len = pkt->total_data_buflen +
937 rndis_msg_size;
938
939 rndis_pkt = &rndis_msg->msg.pkt;
940 rndis_pkt->data_offset = sizeof(struct rndis_packet);
941 if (isvlan)
942 rndis_pkt->data_offset += NDIS_VLAN_PPI_SIZE;
943 rndis_pkt->data_len = pkt->total_data_buflen;
944
945 if (isvlan) {
946 struct rndis_per_packet_info *ppi;
947 struct ndis_pkt_8021q_info *vlan;
948
949 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
950 rndis_pkt->per_pkt_info_len = NDIS_VLAN_PPI_SIZE;
951
952 ppi = (struct rndis_per_packet_info *)((ulong)rndis_pkt +
953 rndis_pkt->per_pkt_info_offset);
954 ppi->size = NDIS_VLAN_PPI_SIZE;
955 ppi->type = IEEE_8021Q_INFO;
956 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
957
958 vlan = (struct ndis_pkt_8021q_info *)((ulong)ppi +
959 ppi->ppi_offset);
960 vlan->vlanid = pkt->vlan_tci & VLAN_VID_MASK;
961 vlan->pri = (pkt->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
962 }
963
964 pkt->is_data_pkt = true;
965 pkt->page_buf[0].pfn = virt_to_phys(rndis_msg) >> PAGE_SHIFT;
966 pkt->page_buf[0].offset =
967 (unsigned long)rndis_msg & (PAGE_SIZE-1);
968 pkt->page_buf[0].len = rndis_msg_size;
969
970 /* Add one page_buf if the rndis msg goes beyond page boundary */
971 if (pkt->page_buf[0].offset + rndis_msg_size > PAGE_SIZE) {
972 int i;
973 for (i = pkt->page_buf_cnt; i > 1; i--)
974 pkt->page_buf[i] = pkt->page_buf[i-1];
975 pkt->page_buf_cnt++;
976 pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
977 pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
978 rndis_msg + pkt->page_buf[0].len)) >> PAGE_SHIFT;
979 pkt->page_buf[1].offset = 0;
980 pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
981 }
982
983 /* Save the packet send completion and context */
984 filter_pkt->completion = pkt->completion.send.send_completion;
985 filter_pkt->completion_ctx =
986 pkt->completion.send.send_completion_ctx;
987
988 /* Use ours */
989 pkt->completion.send.send_completion = rndis_filter_send_completion;
990 pkt->completion.send.send_completion_ctx = filter_pkt;
991
992 ret = netvsc_send(dev, pkt);
993 if (ret != 0) {
994 /*
995 * Reset the completion to originals to allow retries from
996 * above
997 */
998 pkt->completion.send.send_completion =
999 filter_pkt->completion;
1000 pkt->completion.send.send_completion_ctx =
1001 filter_pkt->completion_ctx;
1002 }
1003
1004 return ret;
1005}
1006
1007static void rndis_filter_send_completion(void *ctx)
1008{
1009 struct rndis_filter_packet *filter_pkt = ctx;
1010
1011 /* Pass it back to the original handler */
1012 filter_pkt->completion(filter_pkt->completion_ctx);
1013}
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 08ae4655423a..3e89beab64fd 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -15,9 +15,9 @@ config IEEE802154_FAKEHARD
15 depends on IEEE802154_DRIVERS 15 depends on IEEE802154_DRIVERS
16 ---help--- 16 ---help---
17 Say Y here to enable the fake driver that serves as an example 17 Say Y here to enable the fake driver that serves as an example
18 of HardMAC device driver. 18 of HardMAC device driver.
19 19
20 This driver can also be built as a module. To do so say M here. 20 This driver can also be built as a module. To do so say M here.
21 The module will be called 'fakehard'. 21 The module will be called 'fakehard'.
22 22
23config IEEE802154_FAKELB 23config IEEE802154_FAKELB
@@ -31,17 +31,23 @@ config IEEE802154_FAKELB
31 The module will be called 'fakelb'. 31 The module will be called 'fakelb'.
32 32
33config IEEE802154_AT86RF230 33config IEEE802154_AT86RF230
34 depends on IEEE802154_DRIVERS && MAC802154 34 depends on IEEE802154_DRIVERS && MAC802154
35 tristate "AT86RF230/231 transceiver driver" 35 tristate "AT86RF230/231/233/212 transceiver driver"
36 depends on SPI 36 depends on SPI
37 ---help---
38 Say Y here to enable the at86rf230/231/233/212 SPI 802.15.4 wireless
39 controller.
40
41 This driver can also be built as a module. To do so, say M here.
42 the module will be called 'at86rf230'.
37 43
38config IEEE802154_MRF24J40 44config IEEE802154_MRF24J40
39 tristate "Microchip MRF24J40 transceiver driver" 45 tristate "Microchip MRF24J40 transceiver driver"
40 depends on IEEE802154_DRIVERS && MAC802154 46 depends on IEEE802154_DRIVERS && MAC802154
41 depends on SPI 47 depends on SPI
42 ---help--- 48 ---help---
43 Say Y here to enable the MRF24J20 SPI 802.15.4 wireless 49 Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
44 controller. 50 controller.
45 51
46 This driver can also be built as a module. To do so, say M here. 52 This driver can also be built as a module. To do so, say M here.
47 the module will be called 'mrf24j40'. 53 the module will be called 'mrf24j40'.
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index a30258aad139..89417ac41083 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -31,13 +31,13 @@
31#include <linux/spi/spi.h> 31#include <linux/spi/spi.h>
32#include <linux/spi/at86rf230.h> 32#include <linux/spi/at86rf230.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/of_gpio.h>
34 35
35#include <net/mac802154.h> 36#include <net/mac802154.h>
36#include <net/wpan-phy.h> 37#include <net/wpan-phy.h>
37 38
38struct at86rf230_local { 39struct at86rf230_local {
39 struct spi_device *spi; 40 struct spi_device *spi;
40 int rstn, slp_tr, dig2;
41 41
42 u8 part; 42 u8 part;
43 u8 vers; 43 u8 vers;
@@ -53,8 +53,16 @@ struct at86rf230_local {
53 spinlock_t lock; 53 spinlock_t lock;
54 bool irq_busy; 54 bool irq_busy;
55 bool is_tx; 55 bool is_tx;
56 bool tx_aret;
57
58 int rssi_base_val;
56}; 59};
57 60
61static bool is_rf212(struct at86rf230_local *local)
62{
63 return local->part == 7;
64}
65
58#define RG_TRX_STATUS (0x01) 66#define RG_TRX_STATUS (0x01)
59#define SR_TRX_STATUS 0x01, 0x1f, 0 67#define SR_TRX_STATUS 0x01, 0x1f, 0
60#define SR_RESERVED_01_3 0x01, 0x20, 5 68#define SR_RESERVED_01_3 0x01, 0x20, 5
@@ -100,7 +108,10 @@ struct at86rf230_local {
100#define SR_SFD_VALUE 0x0b, 0xff, 0 108#define SR_SFD_VALUE 0x0b, 0xff, 0
101#define RG_TRX_CTRL_2 (0x0c) 109#define RG_TRX_CTRL_2 (0x0c)
102#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 110#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
103#define SR_RESERVED_0c_2 0x0c, 0x7c, 2 111#define SR_SUB_MODE 0x0c, 0x04, 2
112#define SR_BPSK_QPSK 0x0c, 0x08, 3
113#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
114#define SR_RESERVED_0c_5 0x0c, 0x60, 5
104#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 115#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
105#define RG_ANT_DIV (0x0d) 116#define RG_ANT_DIV (0x0d)
106#define SR_ANT_CTRL 0x0d, 0x03, 0 117#define SR_ANT_CTRL 0x0d, 0x03, 0
@@ -145,7 +156,7 @@ struct at86rf230_local {
145#define SR_RESERVED_17_5 0x17, 0x08, 3 156#define SR_RESERVED_17_5 0x17, 0x08, 3
146#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 157#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
147#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 158#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
148#define SR_RESERVED_17_2 0x17, 0x40, 6 159#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
149#define SR_RESERVED_17_1 0x17, 0x80, 7 160#define SR_RESERVED_17_1 0x17, 0x80, 7
150#define RG_FTN_CTRL (0x18) 161#define RG_FTN_CTRL (0x18)
151#define SR_RESERVED_18_2 0x18, 0x7f, 0 162#define SR_RESERVED_18_2 0x18, 0x7f, 0
@@ -234,6 +245,7 @@ struct at86rf230_local {
234#define STATE_TX_ON 0x09 245#define STATE_TX_ON 0x09
235/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */ 246/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
236#define STATE_SLEEP 0x0F 247#define STATE_SLEEP 0x0F
248#define STATE_PREP_DEEP_SLEEP 0x10
237#define STATE_BUSY_RX_AACK 0x11 249#define STATE_BUSY_RX_AACK 0x11
238#define STATE_BUSY_TX_ARET 0x12 250#define STATE_BUSY_TX_ARET 0x12
239#define STATE_RX_AACK_ON 0x16 251#define STATE_RX_AACK_ON 0x16
@@ -244,6 +256,57 @@ struct at86rf230_local {
244#define STATE_TRANSITION_IN_PROGRESS 0x1F 256#define STATE_TRANSITION_IN_PROGRESS 0x1F
245 257
246static int 258static int
259__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
260 u8 *version)
261{
262 u8 data[4];
263 u8 *buf = kmalloc(2, GFP_KERNEL);
264 int status;
265 struct spi_message msg;
266 struct spi_transfer xfer = {
267 .len = 2,
268 .tx_buf = buf,
269 .rx_buf = buf,
270 };
271 u8 reg;
272
273 if (!buf)
274 return -ENOMEM;
275
276 for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
277 buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
278 buf[1] = 0xff;
279 dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
280 spi_message_init(&msg);
281 spi_message_add_tail(&xfer, &msg);
282
283 status = spi_sync(spi, &msg);
284 dev_vdbg(&spi->dev, "status = %d\n", status);
285 if (msg.status)
286 status = msg.status;
287
288 dev_vdbg(&spi->dev, "status = %d\n", status);
289 dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
290 dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
291
292 if (status == 0)
293 data[reg - RG_PART_NUM] = buf[1];
294 else
295 break;
296 }
297
298 if (status == 0) {
299 *part = data[0];
300 *version = data[1];
301 *man_id = (data[3] << 8) | data[2];
302 }
303
304 kfree(buf);
305
306 return status;
307}
308
309static int
247__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data) 310__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
248{ 311{
249 u8 *buf = lp->buf; 312 u8 *buf = lp->buf;
@@ -489,7 +552,9 @@ at86rf230_state(struct ieee802154_dev *dev, int state)
489 } while (val == STATE_TRANSITION_IN_PROGRESS); 552 } while (val == STATE_TRANSITION_IN_PROGRESS);
490 553
491 554
492 if (val == desired_status) 555 if (val == desired_status ||
556 (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
557 (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
493 return 0; 558 return 0;
494 559
495 pr_err("unexpected state change: %d, asked for %d\n", val, state); 560 pr_err("unexpected state change: %d, asked for %d\n", val, state);
@@ -510,7 +575,11 @@ at86rf230_start(struct ieee802154_dev *dev)
510 if (rc) 575 if (rc)
511 return rc; 576 return rc;
512 577
513 return at86rf230_state(dev, STATE_RX_ON); 578 rc = at86rf230_state(dev, STATE_TX_ON);
579 if (rc)
580 return rc;
581
582 return at86rf230_state(dev, STATE_RX_AACK_ON);
514} 583}
515 584
516static void 585static void
@@ -520,6 +589,39 @@ at86rf230_stop(struct ieee802154_dev *dev)
520} 589}
521 590
522static int 591static int
592at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
593{
594 lp->rssi_base_val = -91;
595
596 return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
597}
598
599static int
600at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
601{
602 int rc;
603
604 if (channel == 0)
605 rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 0);
606 else
607 rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 1);
608 if (rc < 0)
609 return rc;
610
611 if (page == 0) {
612 rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
613 lp->rssi_base_val = -100;
614 } else {
615 rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
616 lp->rssi_base_val = -98;
617 }
618 if (rc < 0)
619 return rc;
620
621 return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
622}
623
624static int
523at86rf230_channel(struct ieee802154_dev *dev, int page, int channel) 625at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
524{ 626{
525 struct at86rf230_local *lp = dev->priv; 627 struct at86rf230_local *lp = dev->priv;
@@ -527,14 +629,22 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
527 629
528 might_sleep(); 630 might_sleep();
529 631
530 if (page != 0 || channel < 11 || channel > 26) { 632 if (page < 0 || page > 31 ||
633 !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
531 WARN_ON(1); 634 WARN_ON(1);
532 return -EINVAL; 635 return -EINVAL;
533 } 636 }
534 637
535 rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel); 638 if (is_rf212(lp))
639 rc = at86rf212_set_channel(lp, page, channel);
640 else
641 rc = at86rf230_set_channel(lp, page, channel);
642 if (rc < 0)
643 return rc;
644
536 msleep(1); /* Wait for PLL */ 645 msleep(1); /* Wait for PLL */
537 dev->phy->current_channel = channel; 646 dev->phy->current_channel = channel;
647 dev->phy->current_page = page;
538 648
539 return 0; 649 return 0;
540} 650}
@@ -568,6 +678,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
568 if (rc) 678 if (rc)
569 goto err_rx; 679 goto err_rx;
570 680
681 if (lp->tx_aret) {
682 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
683 if (rc)
684 goto err_rx;
685 }
686
571 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX); 687 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
572 if (rc) 688 if (rc)
573 goto err_rx; 689 goto err_rx;
@@ -630,30 +746,31 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
630 struct at86rf230_local *lp = dev->priv; 746 struct at86rf230_local *lp = dev->priv;
631 747
632 if (changed & IEEE802515_AFILT_SADDR_CHANGED) { 748 if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
749 u16 addr = le16_to_cpu(filt->short_addr);
750
633 dev_vdbg(&lp->spi->dev, 751 dev_vdbg(&lp->spi->dev,
634 "at86rf230_set_hw_addr_filt called for saddr\n"); 752 "at86rf230_set_hw_addr_filt called for saddr\n");
635 __at86rf230_write(lp, RG_SHORT_ADDR_0, filt->short_addr); 753 __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
636 __at86rf230_write(lp, RG_SHORT_ADDR_1, filt->short_addr >> 8); 754 __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
637 } 755 }
638 756
639 if (changed & IEEE802515_AFILT_PANID_CHANGED) { 757 if (changed & IEEE802515_AFILT_PANID_CHANGED) {
758 u16 pan = le16_to_cpu(filt->pan_id);
759
640 dev_vdbg(&lp->spi->dev, 760 dev_vdbg(&lp->spi->dev,
641 "at86rf230_set_hw_addr_filt called for pan id\n"); 761 "at86rf230_set_hw_addr_filt called for pan id\n");
642 __at86rf230_write(lp, RG_PAN_ID_0, filt->pan_id); 762 __at86rf230_write(lp, RG_PAN_ID_0, pan);
643 __at86rf230_write(lp, RG_PAN_ID_1, filt->pan_id >> 8); 763 __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
644 } 764 }
645 765
646 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) { 766 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
767 u8 i, addr[8];
768
769 memcpy(addr, &filt->ieee_addr, 8);
647 dev_vdbg(&lp->spi->dev, 770 dev_vdbg(&lp->spi->dev,
648 "at86rf230_set_hw_addr_filt called for IEEE addr\n"); 771 "at86rf230_set_hw_addr_filt called for IEEE addr\n");
649 at86rf230_write_subreg(lp, SR_IEEE_ADDR_0, filt->ieee_addr[7]); 772 for (i = 0; i < 8; i++)
650 at86rf230_write_subreg(lp, SR_IEEE_ADDR_1, filt->ieee_addr[6]); 773 __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
651 at86rf230_write_subreg(lp, SR_IEEE_ADDR_2, filt->ieee_addr[5]);
652 at86rf230_write_subreg(lp, SR_IEEE_ADDR_3, filt->ieee_addr[4]);
653 at86rf230_write_subreg(lp, SR_IEEE_ADDR_4, filt->ieee_addr[3]);
654 at86rf230_write_subreg(lp, SR_IEEE_ADDR_5, filt->ieee_addr[2]);
655 at86rf230_write_subreg(lp, SR_IEEE_ADDR_6, filt->ieee_addr[1]);
656 at86rf230_write_subreg(lp, SR_IEEE_ADDR_7, filt->ieee_addr[0]);
657 } 774 }
658 775
659 if (changed & IEEE802515_AFILT_PANC_CHANGED) { 776 if (changed & IEEE802515_AFILT_PANC_CHANGED) {
@@ -668,6 +785,93 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
668 return 0; 785 return 0;
669} 786}
670 787
788static int
789at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
790{
791 struct at86rf230_local *lp = dev->priv;
792
793 /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
794 * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
795 * 0dB.
796 * thus, supported values for db range from -26 to 5, for 31dB of
797 * reduction to 0dB of reduction.
798 */
799 if (db > 5 || db < -26)
800 return -EINVAL;
801
802 db = -(db - 5);
803
804 return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
805}
806
807static int
808at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
809{
810 struct at86rf230_local *lp = dev->priv;
811
812 return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
813}
814
815static int
816at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
817{
818 struct at86rf230_local *lp = dev->priv;
819
820 return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
821}
822
823static int
824at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
825{
826 struct at86rf230_local *lp = dev->priv;
827 int desens_steps;
828
829 if (level < lp->rssi_base_val || level > 30)
830 return -EINVAL;
831
832 desens_steps = (level - lp->rssi_base_val) * 100 / 207;
833
834 return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
835}
836
837static int
838at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
839 u8 retries)
840{
841 struct at86rf230_local *lp = dev->priv;
842 int rc;
843
844 if (min_be > max_be || max_be > 8 || retries > 5)
845 return -EINVAL;
846
847 rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
848 if (rc)
849 return rc;
850
851 rc = at86rf230_write_subreg(lp, SR_MAX_BE, max_be);
852 if (rc)
853 return rc;
854
855 return at86rf230_write_subreg(lp, SR_MAX_CSMA_RETRIES, max_be);
856}
857
858static int
859at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
860{
861 struct at86rf230_local *lp = dev->priv;
862 int rc = 0;
863
864 if (retries < -1 || retries > 15)
865 return -EINVAL;
866
867 lp->tx_aret = retries >= 0;
868
869 if (retries >= 0)
870 rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
871
872 return rc;
873}
874
671static struct ieee802154_ops at86rf230_ops = { 875static struct ieee802154_ops at86rf230_ops = {
672 .owner = THIS_MODULE, 876 .owner = THIS_MODULE,
673 .xmit = at86rf230_xmit, 877 .xmit = at86rf230_xmit,
@@ -678,6 +882,22 @@ static struct ieee802154_ops at86rf230_ops = {
678 .set_hw_addr_filt = at86rf230_set_hw_addr_filt, 882 .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
679}; 883};
680 884
885static struct ieee802154_ops at86rf212_ops = {
886 .owner = THIS_MODULE,
887 .xmit = at86rf230_xmit,
888 .ed = at86rf230_ed,
889 .set_channel = at86rf230_channel,
890 .start = at86rf230_start,
891 .stop = at86rf230_stop,
892 .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
893 .set_txpower = at86rf212_set_txpower,
894 .set_lbt = at86rf212_set_lbt,
895 .set_cca_mode = at86rf212_set_cca_mode,
896 .set_cca_ed_level = at86rf212_set_cca_ed_level,
897 .set_csma_params = at86rf212_set_csma_params,
898 .set_frame_retries = at86rf212_set_frame_retries,
899};
900
681static void at86rf230_irqwork(struct work_struct *work) 901static void at86rf230_irqwork(struct work_struct *work)
682{ 902{
683 struct at86rf230_local *lp = 903 struct at86rf230_local *lp =
@@ -695,8 +915,8 @@ static void at86rf230_irqwork(struct work_struct *work)
695 status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/ 915 status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
696 916
697 if (status & IRQ_TRX_END) { 917 if (status & IRQ_TRX_END) {
698 spin_lock_irqsave(&lp->lock, flags);
699 status &= ~IRQ_TRX_END; 918 status &= ~IRQ_TRX_END;
919 spin_lock_irqsave(&lp->lock, flags);
700 if (lp->is_tx) { 920 if (lp->is_tx) {
701 lp->is_tx = 0; 921 lp->is_tx = 0;
702 spin_unlock_irqrestore(&lp->lock, flags); 922 spin_unlock_irqrestore(&lp->lock, flags);
@@ -753,22 +973,15 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
753 struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data; 973 struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
754 int rc, irq_pol; 974 int rc, irq_pol;
755 u8 status; 975 u8 status;
976 u8 csma_seed[2];
756 977
757 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); 978 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
758 if (rc) 979 if (rc)
759 return rc; 980 return rc;
760 981
761 dev_info(&lp->spi->dev, "Status: %02x\n", status); 982 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
762 if (status == STATE_P_ON) { 983 if (rc)
763 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF); 984 return rc;
764 if (rc)
765 return rc;
766 msleep(1);
767 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
768 if (rc)
769 return rc;
770 dev_info(&lp->spi->dev, "Status: %02x\n", status);
771 }
772 985
773 /* configure irq polarity, defaults to high active */ 986 /* configure irq polarity, defaults to high active */
774 if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) 987 if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
@@ -784,6 +997,14 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
784 if (rc) 997 if (rc)
785 return rc; 998 return rc;
786 999
1000 get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
1001 rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
1002 if (rc)
1003 return rc;
1004 rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]);
1005 if (rc)
1006 return rc;
1007
787 /* CLKM changes are applied immediately */ 1008 /* CLKM changes are applied immediately */
788 rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00); 1009 rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
789 if (rc) 1010 if (rc)
@@ -796,16 +1017,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
796 /* Wait the next SLEEP cycle */ 1017 /* Wait the next SLEEP cycle */
797 msleep(100); 1018 msleep(100);
798 1019
799 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
800 if (rc)
801 return rc;
802 msleep(1);
803
804 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
805 if (rc)
806 return rc;
807 dev_info(&lp->spi->dev, "Status: %02x\n", status);
808
809 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status); 1020 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
810 if (rc) 1021 if (rc)
811 return rc; 1022 return rc;
@@ -825,14 +1036,38 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
825 return 0; 1036 return 0;
826} 1037}
827 1038
828static void at86rf230_fill_data(struct spi_device *spi) 1039static struct at86rf230_platform_data *
1040at86rf230_get_pdata(struct spi_device *spi)
829{ 1041{
830 struct at86rf230_local *lp = spi_get_drvdata(spi); 1042 struct at86rf230_platform_data *pdata;
831 struct at86rf230_platform_data *pdata = spi->dev.platform_data; 1043 const char *irq_type;
1044
1045 if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
1046 return spi->dev.platform_data;
1047
1048 pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
1049 if (!pdata)
1050 goto done;
1051
1052 pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
1053 pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
1054
1055 pdata->irq_type = IRQF_TRIGGER_RISING;
1056 of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
1057 if (!strcmp(irq_type, "level-high"))
1058 pdata->irq_type = IRQF_TRIGGER_HIGH;
1059 else if (!strcmp(irq_type, "level-low"))
1060 pdata->irq_type = IRQF_TRIGGER_LOW;
1061 else if (!strcmp(irq_type, "edge-rising"))
1062 pdata->irq_type = IRQF_TRIGGER_RISING;
1063 else if (!strcmp(irq_type, "edge-falling"))
1064 pdata->irq_type = IRQF_TRIGGER_FALLING;
1065 else
1066 dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
832 1067
833 lp->rstn = pdata->rstn; 1068 spi->dev.platform_data = pdata;
834 lp->slp_tr = pdata->slp_tr; 1069done:
835 lp->dig2 = pdata->dig2; 1070 return pdata;
836} 1071}
837 1072
838static int at86rf230_probe(struct spi_device *spi) 1073static int at86rf230_probe(struct spi_device *spi)
@@ -840,133 +1075,146 @@ static int at86rf230_probe(struct spi_device *spi)
840 struct at86rf230_platform_data *pdata; 1075 struct at86rf230_platform_data *pdata;
841 struct ieee802154_dev *dev; 1076 struct ieee802154_dev *dev;
842 struct at86rf230_local *lp; 1077 struct at86rf230_local *lp;
843 u8 man_id_0, man_id_1, status; 1078 u16 man_id = 0;
1079 u8 part = 0, version = 0, status;
844 irq_handler_t irq_handler; 1080 irq_handler_t irq_handler;
845 work_func_t irq_worker; 1081 work_func_t irq_worker;
846 int rc, supported = 0; 1082 int rc;
847 const char *chip; 1083 const char *chip;
1084 struct ieee802154_ops *ops = NULL;
848 1085
849 if (!spi->irq) { 1086 if (!spi->irq) {
850 dev_err(&spi->dev, "no IRQ specified\n"); 1087 dev_err(&spi->dev, "no IRQ specified\n");
851 return -EINVAL; 1088 return -EINVAL;
852 } 1089 }
853 1090
854 pdata = spi->dev.platform_data; 1091 pdata = at86rf230_get_pdata(spi);
855 if (!pdata) { 1092 if (!pdata) {
856 dev_err(&spi->dev, "no platform_data\n"); 1093 dev_err(&spi->dev, "no platform_data\n");
857 return -EINVAL; 1094 return -EINVAL;
858 } 1095 }
859 1096
860 dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops); 1097 if (gpio_is_valid(pdata->rstn)) {
861 if (!dev) 1098 rc = gpio_request(pdata->rstn, "rstn");
862 return -ENOMEM; 1099 if (rc)
863 1100 return rc;
864 lp = dev->priv;
865 lp->dev = dev;
866
867 lp->spi = spi;
868
869 dev->parent = &spi->dev;
870 dev->extra_tx_headroom = 0;
871 /* We do support only 2.4 Ghz */
872 dev->phy->channels_supported[0] = 0x7FFF800;
873 dev->flags = IEEE802154_HW_OMIT_CKSUM;
874
875 if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
876 irq_worker = at86rf230_irqwork;
877 irq_handler = at86rf230_isr;
878 } else {
879 irq_worker = at86rf230_irqwork_level;
880 irq_handler = at86rf230_isr_level;
881 } 1101 }
882 1102
883 mutex_init(&lp->bmux); 1103 if (gpio_is_valid(pdata->slp_tr)) {
884 INIT_WORK(&lp->irqwork, irq_worker); 1104 rc = gpio_request(pdata->slp_tr, "slp_tr");
885 spin_lock_init(&lp->lock);
886 init_completion(&lp->tx_complete);
887
888 spi_set_drvdata(spi, lp);
889
890 at86rf230_fill_data(spi);
891
892 rc = gpio_request(lp->rstn, "rstn");
893 if (rc)
894 goto err_rstn;
895
896 if (gpio_is_valid(lp->slp_tr)) {
897 rc = gpio_request(lp->slp_tr, "slp_tr");
898 if (rc) 1105 if (rc)
899 goto err_slp_tr; 1106 goto err_slp_tr;
900 } 1107 }
901 1108
902 rc = gpio_direction_output(lp->rstn, 1); 1109 if (gpio_is_valid(pdata->rstn)) {
903 if (rc) 1110 rc = gpio_direction_output(pdata->rstn, 1);
904 goto err_gpio_dir; 1111 if (rc)
1112 goto err_gpio_dir;
1113 }
905 1114
906 if (gpio_is_valid(lp->slp_tr)) { 1115 if (gpio_is_valid(pdata->slp_tr)) {
907 rc = gpio_direction_output(lp->slp_tr, 0); 1116 rc = gpio_direction_output(pdata->slp_tr, 0);
908 if (rc) 1117 if (rc)
909 goto err_gpio_dir; 1118 goto err_gpio_dir;
910 } 1119 }
911 1120
912 /* Reset */ 1121 /* Reset */
913 msleep(1); 1122 if (gpio_is_valid(pdata->rstn)) {
914 gpio_set_value(lp->rstn, 0); 1123 udelay(1);
915 msleep(1); 1124 gpio_set_value(pdata->rstn, 0);
916 gpio_set_value(lp->rstn, 1); 1125 udelay(1);
917 msleep(1); 1126 gpio_set_value(pdata->rstn, 1);
1127 usleep_range(120, 240);
1128 }
918 1129
919 rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0); 1130 rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
920 if (rc) 1131 if (rc < 0)
921 goto err_gpio_dir;
922 rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
923 if (rc)
924 goto err_gpio_dir; 1132 goto err_gpio_dir;
925 1133
926 if (man_id_1 != 0x00 || man_id_0 != 0x1f) { 1134 if (man_id != 0x001f) {
927 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n", 1135 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
928 man_id_1, man_id_0); 1136 man_id >> 8, man_id & 0xFF);
929 rc = -EINVAL; 1137 rc = -EINVAL;
930 goto err_gpio_dir; 1138 goto err_gpio_dir;
931 } 1139 }
932 1140
933 rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part); 1141 switch (part) {
934 if (rc)
935 goto err_gpio_dir;
936
937 rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
938 if (rc)
939 goto err_gpio_dir;
940
941 switch (lp->part) {
942 case 2: 1142 case 2:
943 chip = "at86rf230"; 1143 chip = "at86rf230";
944 /* supported = 1; FIXME: should be easy to support; */ 1144 /* FIXME: should be easy to support; */
945 break; 1145 break;
946 case 3: 1146 case 3:
947 chip = "at86rf231"; 1147 chip = "at86rf231";
948 supported = 1; 1148 ops = &at86rf230_ops;
1149 break;
1150 case 7:
1151 chip = "at86rf212";
1152 if (version == 1)
1153 ops = &at86rf212_ops;
1154 break;
1155 case 11:
1156 chip = "at86rf233";
1157 ops = &at86rf230_ops;
949 break; 1158 break;
950 default: 1159 default:
951 chip = "UNKNOWN"; 1160 chip = "UNKNOWN";
952 break; 1161 break;
953 } 1162 }
954 1163
955 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers); 1164 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
956 if (!supported) { 1165 if (!ops) {
957 rc = -ENOTSUPP; 1166 rc = -ENOTSUPP;
958 goto err_gpio_dir; 1167 goto err_gpio_dir;
959 } 1168 }
960 1169
1170 dev = ieee802154_alloc_device(sizeof(*lp), ops);
1171 if (!dev) {
1172 rc = -ENOMEM;
1173 goto err_gpio_dir;
1174 }
1175
1176 lp = dev->priv;
1177 lp->dev = dev;
1178 lp->part = part;
1179 lp->vers = version;
1180
1181 lp->spi = spi;
1182
1183 dev->parent = &spi->dev;
1184 dev->extra_tx_headroom = 0;
1185 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
1186
1187 if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
1188 irq_worker = at86rf230_irqwork;
1189 irq_handler = at86rf230_isr;
1190 } else {
1191 irq_worker = at86rf230_irqwork_level;
1192 irq_handler = at86rf230_isr_level;
1193 }
1194
1195 mutex_init(&lp->bmux);
1196 INIT_WORK(&lp->irqwork, irq_worker);
1197 spin_lock_init(&lp->lock);
1198 init_completion(&lp->tx_complete);
1199
1200 spi_set_drvdata(spi, lp);
1201
1202 if (is_rf212(lp)) {
1203 dev->phy->channels_supported[0] = 0x00007FF;
1204 dev->phy->channels_supported[2] = 0x00007FF;
1205 } else {
1206 dev->phy->channels_supported[0] = 0x7FFF800;
1207 }
1208
961 rc = at86rf230_hw_init(lp); 1209 rc = at86rf230_hw_init(lp);
962 if (rc) 1210 if (rc)
963 goto err_gpio_dir; 1211 goto err_hw_init;
964 1212
965 rc = request_irq(spi->irq, irq_handler, 1213 rc = request_irq(spi->irq, irq_handler,
966 IRQF_SHARED | pdata->irq_type, 1214 IRQF_SHARED | pdata->irq_type,
967 dev_name(&spi->dev), lp); 1215 dev_name(&spi->dev), lp);
968 if (rc) 1216 if (rc)
969 goto err_gpio_dir; 1217 goto err_hw_init;
970 1218
971 /* Read irq status register to reset irq line */ 1219 /* Read irq status register to reset irq line */
972 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status); 1220 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
@@ -981,30 +1229,37 @@ static int at86rf230_probe(struct spi_device *spi)
981 1229
982err_irq: 1230err_irq:
983 free_irq(spi->irq, lp); 1231 free_irq(spi->irq, lp);
1232err_hw_init:
984 flush_work(&lp->irqwork); 1233 flush_work(&lp->irqwork);
985err_gpio_dir: 1234 spi_set_drvdata(spi, NULL);
986 if (gpio_is_valid(lp->slp_tr))
987 gpio_free(lp->slp_tr);
988err_slp_tr:
989 gpio_free(lp->rstn);
990err_rstn:
991 mutex_destroy(&lp->bmux); 1235 mutex_destroy(&lp->bmux);
992 ieee802154_free_device(lp->dev); 1236 ieee802154_free_device(lp->dev);
1237
1238err_gpio_dir:
1239 if (gpio_is_valid(pdata->slp_tr))
1240 gpio_free(pdata->slp_tr);
1241err_slp_tr:
1242 if (gpio_is_valid(pdata->rstn))
1243 gpio_free(pdata->rstn);
993 return rc; 1244 return rc;
994} 1245}
995 1246
996static int at86rf230_remove(struct spi_device *spi) 1247static int at86rf230_remove(struct spi_device *spi)
997{ 1248{
998 struct at86rf230_local *lp = spi_get_drvdata(spi); 1249 struct at86rf230_local *lp = spi_get_drvdata(spi);
1250 struct at86rf230_platform_data *pdata = spi->dev.platform_data;
999 1251
1252 /* mask all at86rf230 irq's */
1253 at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
1000 ieee802154_unregister_device(lp->dev); 1254 ieee802154_unregister_device(lp->dev);
1001 1255
1002 free_irq(spi->irq, lp); 1256 free_irq(spi->irq, lp);
1003 flush_work(&lp->irqwork); 1257 flush_work(&lp->irqwork);
1004 1258
1005 if (gpio_is_valid(lp->slp_tr)) 1259 if (gpio_is_valid(pdata->slp_tr))
1006 gpio_free(lp->slp_tr); 1260 gpio_free(pdata->slp_tr);
1007 gpio_free(lp->rstn); 1261 if (gpio_is_valid(pdata->rstn))
1262 gpio_free(pdata->rstn);
1008 1263
1009 mutex_destroy(&lp->bmux); 1264 mutex_destroy(&lp->bmux);
1010 ieee802154_free_device(lp->dev); 1265 ieee802154_free_device(lp->dev);
@@ -1013,8 +1268,19 @@ static int at86rf230_remove(struct spi_device *spi)
1013 return 0; 1268 return 0;
1014} 1269}
1015 1270
1271#if IS_ENABLED(CONFIG_OF)
1272static struct of_device_id at86rf230_of_match[] = {
1273 { .compatible = "atmel,at86rf230", },
1274 { .compatible = "atmel,at86rf231", },
1275 { .compatible = "atmel,at86rf233", },
1276 { .compatible = "atmel,at86rf212", },
1277 { },
1278};
1279#endif
1280
1016static struct spi_driver at86rf230_driver = { 1281static struct spi_driver at86rf230_driver = {
1017 .driver = { 1282 .driver = {
1283 .of_match_table = of_match_ptr(at86rf230_of_match),
1018 .name = "at86rf230", 1284 .name = "at86rf230",
1019 .owner = THIS_MODULE, 1285 .owner = THIS_MODULE,
1020 }, 1286 },
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index bf0d55e2dd63..78f18be3bbf2 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -63,11 +63,11 @@ static struct wpan_phy *fake_get_phy(const struct net_device *dev)
63 * 63 *
64 * Return the ID of the PAN from the PIB. 64 * Return the ID of the PAN from the PIB.
65 */ 65 */
66static u16 fake_get_pan_id(const struct net_device *dev) 66static __le16 fake_get_pan_id(const struct net_device *dev)
67{ 67{
68 BUG_ON(dev->type != ARPHRD_IEEE802154); 68 BUG_ON(dev->type != ARPHRD_IEEE802154);
69 69
70 return 0xeba1; 70 return cpu_to_le16(0xeba1);
71} 71}
72 72
73/** 73/**
@@ -78,11 +78,11 @@ static u16 fake_get_pan_id(const struct net_device *dev)
78 * device. If the device has not yet had a short address assigned 78 * device. If the device has not yet had a short address assigned
79 * then this should return 0xFFFF to indicate a lack of association. 79 * then this should return 0xFFFF to indicate a lack of association.
80 */ 80 */
81static u16 fake_get_short_addr(const struct net_device *dev) 81static __le16 fake_get_short_addr(const struct net_device *dev)
82{ 82{
83 BUG_ON(dev->type != ARPHRD_IEEE802154); 83 BUG_ON(dev->type != ARPHRD_IEEE802154);
84 84
85 return 0x1; 85 return cpu_to_le16(0x1);
86} 86}
87 87
88/** 88/**
@@ -149,7 +149,7 @@ static int fake_assoc_req(struct net_device *dev,
149 * 802.15.4-2006 document. 149 * 802.15.4-2006 document.
150 */ 150 */
151static int fake_assoc_resp(struct net_device *dev, 151static int fake_assoc_resp(struct net_device *dev,
152 struct ieee802154_addr *addr, u16 short_addr, u8 status) 152 struct ieee802154_addr *addr, __le16 short_addr, u8 status)
153{ 153{
154 return 0; 154 return 0;
155} 155}
@@ -191,10 +191,10 @@ static int fake_disassoc_req(struct net_device *dev,
191 * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006 191 * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
192 * document, with 7.3.8 describing coordinator realignment. 192 * document, with 7.3.8 describing coordinator realignment.
193 */ 193 */
194static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr, 194static int fake_start_req(struct net_device *dev,
195 u8 channel, u8 page, 195 struct ieee802154_addr *addr, u8 channel, u8 page,
196 u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, 196 u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
197 u8 coord_realign) 197 u8 coord_realign)
198{ 198{
199 struct wpan_phy *phy = fake_to_phy(dev); 199 struct wpan_phy *phy = fake_to_phy(dev);
200 200
@@ -281,8 +281,8 @@ static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
281 switch (cmd) { 281 switch (cmd) {
282 case SIOCGIFADDR: 282 case SIOCGIFADDR:
283 /* FIXME: fixed here, get from device IRL */ 283 /* FIXME: fixed here, get from device IRL */
284 pan_id = fake_get_pan_id(dev); 284 pan_id = le16_to_cpu(fake_get_pan_id(dev));
285 short_addr = fake_get_short_addr(dev); 285 short_addr = le16_to_cpu(fake_get_short_addr(dev));
286 if (pan_id == IEEE802154_PANID_BROADCAST || 286 if (pan_id == IEEE802154_PANID_BROADCAST ||
287 short_addr == IEEE802154_ADDR_BROADCAST) 287 short_addr == IEEE802154_ADDR_BROADCAST)
288 return -EADDRNOTAVAIL; 288 return -EADDRNOTAVAIL;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 246befa4ba05..78a6552ed707 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -465,8 +465,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
465 if (changed & IEEE802515_AFILT_SADDR_CHANGED) { 465 if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
466 /* Short Addr */ 466 /* Short Addr */
467 u8 addrh, addrl; 467 u8 addrh, addrl;
468 addrh = filt->short_addr >> 8 & 0xff; 468 addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff;
469 addrl = filt->short_addr & 0xff; 469 addrl = le16_to_cpu(filt->short_addr) & 0xff;
470 470
471 write_short_reg(devrec, REG_SADRH, addrh); 471 write_short_reg(devrec, REG_SADRH, addrh);
472 write_short_reg(devrec, REG_SADRL, addrl); 472 write_short_reg(devrec, REG_SADRL, addrl);
@@ -476,15 +476,16 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
476 476
477 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) { 477 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
478 /* Device Address */ 478 /* Device Address */
479 int i; 479 u8 i, addr[8];
480
481 memcpy(addr, &filt->ieee_addr, 8);
480 for (i = 0; i < 8; i++) 482 for (i = 0; i < 8; i++)
481 write_short_reg(devrec, REG_EADR0+i, 483 write_short_reg(devrec, REG_EADR0 + i, addr[i]);
482 filt->ieee_addr[7-i]);
483 484
484#ifdef DEBUG 485#ifdef DEBUG
485 printk(KERN_DEBUG "Set long addr to: "); 486 printk(KERN_DEBUG "Set long addr to: ");
486 for (i = 0; i < 8; i++) 487 for (i = 0; i < 8; i++)
487 printk("%02hhx ", filt->ieee_addr[i]); 488 printk("%02hhx ", addr[7 - i]);
488 printk(KERN_DEBUG "\n"); 489 printk(KERN_DEBUG "\n");
489#endif 490#endif
490 } 491 }
@@ -492,8 +493,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
492 if (changed & IEEE802515_AFILT_PANID_CHANGED) { 493 if (changed & IEEE802515_AFILT_PANID_CHANGED) {
493 /* PAN ID */ 494 /* PAN ID */
494 u8 panidl, panidh; 495 u8 panidl, panidh;
495 panidh = filt->pan_id >> 8 & 0xff; 496 panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff;
496 panidl = filt->pan_id & 0xff; 497 panidl = le16_to_cpu(filt->pan_id) & 0xff;
497 write_short_reg(devrec, REG_PANIDH, panidh); 498 write_short_reg(devrec, REG_PANIDH, panidh);
498 write_short_reg(devrec, REG_PANIDL, panidl); 499 write_short_reg(devrec, REG_PANIDL, panidl);
499 500
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index d7b2e947184b..46a7790be004 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -136,18 +136,18 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
136 unsigned int start; 136 unsigned int start;
137 137
138 do { 138 do {
139 start = u64_stats_fetch_begin_bh(&dp->rsync); 139 start = u64_stats_fetch_begin_irq(&dp->rsync);
140 stats->rx_packets = dp->rx_packets; 140 stats->rx_packets = dp->rx_packets;
141 stats->rx_bytes = dp->rx_bytes; 141 stats->rx_bytes = dp->rx_bytes;
142 } while (u64_stats_fetch_retry_bh(&dp->rsync, start)); 142 } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
143 143
144 do { 144 do {
145 start = u64_stats_fetch_begin_bh(&dp->tsync); 145 start = u64_stats_fetch_begin_irq(&dp->tsync);
146 146
147 stats->tx_packets = dp->tx_packets; 147 stats->tx_packets = dp->tx_packets;
148 stats->tx_bytes = dp->tx_bytes; 148 stats->tx_bytes = dp->tx_bytes;
149 149
150 } while (u64_stats_fetch_retry_bh(&dp->tsync, start)); 150 } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
151 151
152 stats->rx_dropped = dev->stats.rx_dropped; 152 stats->rx_dropped = dev->stats.rx_dropped;
153 stats->tx_dropped = dev->stats.tx_dropped; 153 stats->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c5011e078e1b..bb96409f8c05 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -111,10 +111,10 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
111 111
112 lb_stats = per_cpu_ptr(dev->lstats, i); 112 lb_stats = per_cpu_ptr(dev->lstats, i);
113 do { 113 do {
114 start = u64_stats_fetch_begin_bh(&lb_stats->syncp); 114 start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
115 tbytes = lb_stats->bytes; 115 tbytes = lb_stats->bytes;
116 tpackets = lb_stats->packets; 116 tpackets = lb_stats->packets;
117 } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start)); 117 } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
118 bytes += tbytes; 118 bytes += tbytes;
119 packets += tpackets; 119 packets += tpackets;
120 } 120 }
@@ -136,16 +136,9 @@ static const struct ethtool_ops loopback_ethtool_ops = {
136 136
137static int loopback_dev_init(struct net_device *dev) 137static int loopback_dev_init(struct net_device *dev)
138{ 138{
139 int i; 139 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
140 dev->lstats = alloc_percpu(struct pcpu_lstats);
141 if (!dev->lstats) 140 if (!dev->lstats)
142 return -ENOMEM; 141 return -ENOMEM;
143
144 for_each_possible_cpu(i) {
145 struct pcpu_lstats *lb_stats;
146 lb_stats = per_cpu_ptr(dev->lstats, i);
147 u64_stats_init(&lb_stats->syncp);
148 }
149 return 0; 142 return 0;
150} 143}
151 144
@@ -160,6 +153,7 @@ static const struct net_device_ops loopback_ops = {
160 .ndo_init = loopback_dev_init, 153 .ndo_init = loopback_dev_init,
161 .ndo_start_xmit= loopback_xmit, 154 .ndo_start_xmit= loopback_xmit,
162 .ndo_get_stats64 = loopback_get_stats64, 155 .ndo_get_stats64 = loopback_get_stats64,
156 .ndo_set_mac_address = eth_mac_addr,
163}; 157};
164 158
165/* 159/*
@@ -174,6 +168,7 @@ static void loopback_setup(struct net_device *dev)
174 dev->tx_queue_len = 0; 168 dev->tx_queue_len = 0;
175 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
176 dev->flags = IFF_LOOPBACK; 170 dev->flags = IFF_LOOPBACK;
171 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
177 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 172 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
178 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; 173 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
179 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 174 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -181,6 +176,7 @@ static void loopback_setup(struct net_device *dev)
181 | NETIF_F_UFO 176 | NETIF_F_UFO
182 | NETIF_F_HW_CSUM 177 | NETIF_F_HW_CSUM
183 | NETIF_F_RXCSUM 178 | NETIF_F_RXCSUM
179 | NETIF_F_SCTP_CSUM
184 | NETIF_F_HIGHDMA 180 | NETIF_F_HIGHDMA
185 | NETIF_F_LLTX 181 | NETIF_F_LLTX
186 | NETIF_F_NETNS_LOCAL 182 | NETIF_F_NETNS_LOCAL
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1831fb7cd017..753a8c23d15d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -537,7 +537,6 @@ static int macvlan_init(struct net_device *dev)
537{ 537{
538 struct macvlan_dev *vlan = netdev_priv(dev); 538 struct macvlan_dev *vlan = netdev_priv(dev);
539 const struct net_device *lowerdev = vlan->lowerdev; 539 const struct net_device *lowerdev = vlan->lowerdev;
540 int i;
541 540
542 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 541 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
543 (lowerdev->state & MACVLAN_STATE_MASK); 542 (lowerdev->state & MACVLAN_STATE_MASK);
@@ -549,16 +548,10 @@ static int macvlan_init(struct net_device *dev)
549 548
550 macvlan_set_lockdep_class(dev); 549 macvlan_set_lockdep_class(dev);
551 550
552 vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats); 551 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
553 if (!vlan->pcpu_stats) 552 if (!vlan->pcpu_stats)
554 return -ENOMEM; 553 return -ENOMEM;
555 554
556 for_each_possible_cpu(i) {
557 struct vlan_pcpu_stats *mvlstats;
558 mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
559 u64_stats_init(&mvlstats->syncp);
560 }
561
562 return 0; 555 return 0;
563} 556}
564 557
@@ -589,13 +582,13 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
589 for_each_possible_cpu(i) { 582 for_each_possible_cpu(i) {
590 p = per_cpu_ptr(vlan->pcpu_stats, i); 583 p = per_cpu_ptr(vlan->pcpu_stats, i);
591 do { 584 do {
592 start = u64_stats_fetch_begin_bh(&p->syncp); 585 start = u64_stats_fetch_begin_irq(&p->syncp);
593 rx_packets = p->rx_packets; 586 rx_packets = p->rx_packets;
594 rx_bytes = p->rx_bytes; 587 rx_bytes = p->rx_bytes;
595 rx_multicast = p->rx_multicast; 588 rx_multicast = p->rx_multicast;
596 tx_packets = p->tx_packets; 589 tx_packets = p->tx_packets;
597 tx_bytes = p->tx_bytes; 590 tx_bytes = p->tx_bytes;
598 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 591 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
599 592
600 stats->rx_packets += rx_packets; 593 stats->rx_packets += rx_packets;
601 stats->rx_bytes += rx_bytes; 594 stats->rx_bytes += rx_bytes;
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index d2bb12bfabd5..34924dfadd00 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -47,16 +47,7 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
47 47
48static int nlmon_dev_init(struct net_device *dev) 48static int nlmon_dev_init(struct net_device *dev)
49{ 49{
50 int i; 50 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
51
52 dev->lstats = alloc_percpu(struct pcpu_lstats);
53
54 for_each_possible_cpu(i) {
55 struct pcpu_lstats *nlmstats;
56 nlmstats = per_cpu_ptr(dev->lstats, i);
57 u64_stats_init(&nlmstats->syncp);
58 }
59
60 return dev->lstats == NULL ? -ENOMEM : 0; 51 return dev->lstats == NULL ? -ENOMEM : 0;
61} 52}
62 53
@@ -99,10 +90,10 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
99 nl_stats = per_cpu_ptr(dev->lstats, i); 90 nl_stats = per_cpu_ptr(dev->lstats, i);
100 91
101 do { 92 do {
102 start = u64_stats_fetch_begin_bh(&nl_stats->syncp); 93 start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
103 tbytes = nl_stats->bytes; 94 tbytes = nl_stats->bytes;
104 tpackets = nl_stats->packets; 95 tpackets = nl_stats->packets;
105 } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start)); 96 } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
106 97
107 packets += tpackets; 98 packets += tpackets;
108 bytes += tbytes; 99 bytes += tbytes;
@@ -145,7 +136,8 @@ static void nlmon_setup(struct net_device *dev)
145 dev->ethtool_ops = &nlmon_ethtool_ops; 136 dev->ethtool_ops = &nlmon_ethtool_ops;
146 dev->destructor = free_netdev; 137 dev->destructor = free_netdev;
147 138
148 dev->features = NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; 139 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
140 NETIF_F_HIGHDMA | NETIF_F_LLTX;
149 dev->flags = IFF_NOARP; 141 dev->flags = IFF_NOARP;
150 142
151 /* That's rather a softlimit here, which, of course, 143 /* That's rather a softlimit here, which, of course,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9b5d46c03eed..6a17f92153b3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -71,6 +71,12 @@ config BCM63XX_PHY
71 ---help--- 71 ---help---
72 Currently supports the 6348 and 6358 PHYs. 72 Currently supports the 6348 and 6358 PHYs.
73 73
74config BCM7XXX_PHY
75 tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
76 ---help---
77 Currently supports the BCM7366, BCM7439, BCM7445, and
78 40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
79
74config BCM87XX_PHY 80config BCM87XX_PHY
75 tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs" 81 tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
76 help 82 help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 9013dfa12aa3..07d24024863e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
12obj-$(CONFIG_VITESSE_PHY) += vitesse.o 12obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o 13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
14obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o 14obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
15obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
15obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o 16obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
16obj-$(CONFIG_ICPLUS_PHY) += icplus.o 17obj-$(CONFIG_ICPLUS_PHY) += icplus.o
17obj-$(CONFIG_REALTEK_PHY) += realtek.o 18obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index bc71947b1ec3..643464d5a727 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -27,6 +27,9 @@
27#define AT803X_MMD_ACCESS_CONTROL 0x0D 27#define AT803X_MMD_ACCESS_CONTROL 0x0D
28#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E 28#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
29#define AT803X_FUNC_DATA 0x4003 29#define AT803X_FUNC_DATA 0x4003
30#define AT803X_INER 0x0012
31#define AT803X_INER_INIT 0xec00
32#define AT803X_INSR 0x0013
30#define AT803X_DEBUG_ADDR 0x1D 33#define AT803X_DEBUG_ADDR 0x1D
31#define AT803X_DEBUG_DATA 0x1E 34#define AT803X_DEBUG_DATA 0x1E
32#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05 35#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
@@ -191,6 +194,31 @@ static int at803x_config_init(struct phy_device *phydev)
191 return 0; 194 return 0;
192} 195}
193 196
197static int at803x_ack_interrupt(struct phy_device *phydev)
198{
199 int err;
200
201 err = phy_read(phydev, AT803X_INSR);
202
203 return (err < 0) ? err : 0;
204}
205
206static int at803x_config_intr(struct phy_device *phydev)
207{
208 int err;
209 int value;
210
211 value = phy_read(phydev, AT803X_INER);
212
213 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
214 err = phy_write(phydev, AT803X_INER,
215 value | AT803X_INER_INIT);
216 else
217 err = phy_write(phydev, AT803X_INER, 0);
218
219 return err;
220}
221
194static struct phy_driver at803x_driver[] = { 222static struct phy_driver at803x_driver[] = {
195{ 223{
196 /* ATHEROS 8035 */ 224 /* ATHEROS 8035 */
@@ -240,6 +268,8 @@ static struct phy_driver at803x_driver[] = {
240 .flags = PHY_HAS_INTERRUPT, 268 .flags = PHY_HAS_INTERRUPT,
241 .config_aneg = genphy_config_aneg, 269 .config_aneg = genphy_config_aneg,
242 .read_status = genphy_read_status, 270 .read_status = genphy_read_status,
271 .ack_interrupt = &at803x_ack_interrupt,
272 .config_intr = &at803x_config_intr,
243 .driver = { 273 .driver = {
244 .owner = THIS_MODULE, 274 .owner = THIS_MODULE,
245 }, 275 },
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
new file mode 100644
index 000000000000..526b94cea569
--- /dev/null
+++ b/drivers/net/phy/bcm7xxx.c
@@ -0,0 +1,359 @@
1/*
2 * Broadcom BCM7xxx internal transceivers support.
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/phy.h>
14#include <linux/delay.h>
15#include <linux/bitops.h>
16#include <linux/brcmphy.h>
17
18/* Broadcom BCM7xxx internal PHY registers */
19#define MII_BCM7XXX_CHANNEL_WIDTH 0x2000
20
21/* 40nm only register definitions */
22#define MII_BCM7XXX_100TX_AUX_CTL 0x10
23#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
24#define MII_BCM7XXX_100TX_DISC 0x14
25#define MII_BCM7XXX_AUX_MODE 0x1d
26#define MII_BCM7XX_64CLK_MDIO BIT(12)
27#define MII_BCM7XXX_CORE_BASE1E 0x1e
28#define MII_BCM7XXX_TEST 0x1f
29#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
30
31/* 28nm only register definitions */
32#define MISC_ADDR(base, channel) base, channel
33
34#define DSP_TAP10 MISC_ADDR(0x0a, 0)
35#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
36#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
37#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
38
39#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
40#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
41#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
42#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
43#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
44
45#define CORE_EXPB0 0xb0
46
47static int bcm7445_config_init(struct phy_device *phydev)
48{
49 int ret;
50 const struct bcm7445_regs {
51 int reg;
52 u16 value;
53 } bcm7445_regs_cfg[] = {
54 /* increases ADC latency by 24ns */
55 { MII_BCM54XX_EXP_SEL, 0x0038 },
56 { MII_BCM54XX_EXP_DATA, 0xAB95 },
57 /* increases internal 1V LDO voltage by 5% */
58 { MII_BCM54XX_EXP_SEL, 0x2038 },
59 { MII_BCM54XX_EXP_DATA, 0xBB22 },
60 /* reduce RX low pass filter corner frequency */
61 { MII_BCM54XX_EXP_SEL, 0x6038 },
62 { MII_BCM54XX_EXP_DATA, 0xFFC5 },
63 /* reduce RX high pass filter corner frequency */
64 { MII_BCM54XX_EXP_SEL, 0x003a },
65 { MII_BCM54XX_EXP_DATA, 0x2002 },
66 };
67 unsigned int i;
68
69 for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
70 ret = phy_write(phydev,
71 bcm7445_regs_cfg[i].reg,
72 bcm7445_regs_cfg[i].value);
73 if (ret)
74 return ret;
75 }
76
77 return 0;
78}
79
80static void phy_write_exp(struct phy_device *phydev,
81 u16 reg, u16 value)
82{
83 phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
84 phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
85}
86
87static void phy_write_misc(struct phy_device *phydev,
88 u16 reg, u16 chl, u16 value)
89{
90 int tmp;
91
92 phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
93
94 tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
95 tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
96 phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
97
98 tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
99 phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
100
101 phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
102}
103
104static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
105{
106 /* Increase VCO range to prevent unlocking problem of PLL at low
107 * temp
108 */
109 phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
110
111 /* Change Ki to 011 */
112 phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
113
114 /* Disable loading of TVCO buffer to bandgap, set bandgap trim
115 * to 111
116 */
117 phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
118
119 /* Adjust bias current trim by -3 */
120 phy_write_misc(phydev, DSP_TAP10, 0x690b);
121
122 /* Switch to CORE_BASE1E */
123 phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
124
125 /* Reset R_CAL/RC_CAL Engine */
126 phy_write_exp(phydev, CORE_EXPB0, 0x0010);
127
128 /* Disable Reset R_CAL/RC_CAL Engine */
129 phy_write_exp(phydev, CORE_EXPB0, 0x0000);
130
131 /* write AFE_RXCONFIG_0 */
132 phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
133
134 /* write AFE_RXCONFIG_1 */
135 phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
136
137 /* write AFE_RX_LP_COUNTER */
138 phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
139
140 /* write AFE_HPF_TRIM_OTHERS */
141 phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
142
143 /* write AFTE_TX_CONFIG */
144 phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
145
146 return 0;
147}
148
149static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
150{
151 int ret;
152
153 ret = bcm7445_config_init(phydev);
154 if (ret)
155 return ret;
156
157 return bcm7xxx_28nm_afe_config_init(phydev);
158}
159
160static int phy_set_clr_bits(struct phy_device *dev, int location,
161 int set_mask, int clr_mask)
162{
163 int v, ret;
164
165 v = phy_read(dev, location);
166 if (v < 0)
167 return v;
168
169 v &= ~clr_mask;
170 v |= set_mask;
171
172 ret = phy_write(dev, location, v);
173 if (ret < 0)
174 return ret;
175
176 return v;
177}
178
179static int bcm7xxx_config_init(struct phy_device *phydev)
180{
181 int ret;
182
183 /* Enable 64 clock MDIO */
184 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
185 phy_read(phydev, MII_BCM7XXX_AUX_MODE);
186
187 /* Workaround only required for 100Mbits/sec */
188 if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR))
189 return 0;
190
191 /* set shadow mode 2 */
192 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
193 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
194 if (ret < 0)
195 return ret;
196
197 /* set iddq_clkbias */
198 phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0F00);
199 udelay(10);
200
201 /* reset iddq_clkbias */
202 phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0C00);
203
204 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
205
206 /* reset shadow mode 2 */
207 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0);
208 if (ret < 0)
209 return ret;
210
211 return 0;
212}
213
214/* Workaround for putting the PHY in IDDQ mode, required
215 * for all BCM7XXX PHYs
216 */
217static int bcm7xxx_suspend(struct phy_device *phydev)
218{
219 int ret;
220 const struct bcm7xxx_regs {
221 int reg;
222 u16 value;
223 } bcm7xxx_suspend_cfg[] = {
224 { MII_BCM7XXX_TEST, 0x008b },
225 { MII_BCM7XXX_100TX_AUX_CTL, 0x01c0 },
226 { MII_BCM7XXX_100TX_DISC, 0x7000 },
227 { MII_BCM7XXX_TEST, 0x000f },
228 { MII_BCM7XXX_100TX_AUX_CTL, 0x20d0 },
229 { MII_BCM7XXX_TEST, 0x000b },
230 };
231 unsigned int i;
232
233 for (i = 0; i < ARRAY_SIZE(bcm7xxx_suspend_cfg); i++) {
234 ret = phy_write(phydev,
235 bcm7xxx_suspend_cfg[i].reg,
236 bcm7xxx_suspend_cfg[i].value);
237 if (ret)
238 return ret;
239 }
240
241 return 0;
242}
243
244static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
245{
246 return 0;
247}
248
249static struct phy_driver bcm7xxx_driver[] = {
250{
251 .phy_id = PHY_ID_BCM7366,
252 .phy_id_mask = 0xfffffff0,
253 .name = "Broadcom BCM7366",
254 .features = PHY_GBIT_FEATURES |
255 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
256 .flags = PHY_IS_INTERNAL,
257 .config_init = bcm7xxx_28nm_afe_config_init,
258 .config_aneg = genphy_config_aneg,
259 .read_status = genphy_read_status,
260 .suspend = bcm7xxx_suspend,
261 .resume = bcm7xxx_28nm_afe_config_init,
262 .driver = { .owner = THIS_MODULE },
263}, {
264 .phy_id = PHY_ID_BCM7439,
265 .phy_id_mask = 0xfffffff0,
266 .name = "Broadcom BCM7439",
267 .features = PHY_GBIT_FEATURES |
268 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
269 .flags = PHY_IS_INTERNAL,
270 .config_init = bcm7xxx_28nm_afe_config_init,
271 .config_aneg = genphy_config_aneg,
272 .read_status = genphy_read_status,
273 .suspend = bcm7xxx_suspend,
274 .resume = bcm7xxx_28nm_afe_config_init,
275 .driver = { .owner = THIS_MODULE },
276}, {
277 .phy_id = PHY_ID_BCM7445,
278 .phy_id_mask = 0xfffffff0,
279 .name = "Broadcom BCM7445",
280 .features = PHY_GBIT_FEATURES |
281 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
282 .flags = PHY_IS_INTERNAL,
283 .config_init = bcm7xxx_28nm_config_init,
284 .config_aneg = genphy_config_aneg,
285 .read_status = genphy_read_status,
286 .suspend = bcm7xxx_suspend,
287 .resume = bcm7xxx_28nm_config_init,
288 .driver = { .owner = THIS_MODULE },
289}, {
290 .name = "Broadcom BCM7XXX 28nm",
291 .phy_id = PHY_ID_BCM7XXX_28,
292 .phy_id_mask = PHY_BCM_OUI_MASK,
293 .features = PHY_GBIT_FEATURES |
294 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
295 .flags = PHY_IS_INTERNAL,
296 .config_init = bcm7xxx_28nm_config_init,
297 .config_aneg = genphy_config_aneg,
298 .read_status = genphy_read_status,
299 .suspend = bcm7xxx_suspend,
300 .resume = bcm7xxx_28nm_config_init,
301 .driver = { .owner = THIS_MODULE },
302}, {
303 .phy_id = PHY_BCM_OUI_4,
304 .phy_id_mask = 0xffff0000,
305 .name = "Broadcom BCM7XXX 40nm",
306 .features = PHY_GBIT_FEATURES |
307 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
308 .flags = PHY_IS_INTERNAL,
309 .config_init = bcm7xxx_config_init,
310 .config_aneg = genphy_config_aneg,
311 .read_status = genphy_read_status,
312 .suspend = bcm7xxx_suspend,
313 .resume = bcm7xxx_config_init,
314 .driver = { .owner = THIS_MODULE },
315}, {
316 .phy_id = PHY_BCM_OUI_5,
317 .phy_id_mask = 0xffffff00,
318 .name = "Broadcom BCM7XXX 65nm",
319 .features = PHY_BASIC_FEATURES |
320 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
321 .flags = PHY_IS_INTERNAL,
322 .config_init = bcm7xxx_dummy_config_init,
323 .config_aneg = genphy_config_aneg,
324 .read_status = genphy_read_status,
325 .suspend = bcm7xxx_suspend,
326 .resume = bcm7xxx_config_init,
327 .driver = { .owner = THIS_MODULE },
328} };
329
330static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
331 { PHY_ID_BCM7366, 0xfffffff0, },
332 { PHY_ID_BCM7439, 0xfffffff0, },
333 { PHY_ID_BCM7445, 0xfffffff0, },
334 { PHY_ID_BCM7XXX_28, 0xfffffc00 },
335 { PHY_BCM_OUI_4, 0xffff0000 },
336 { PHY_BCM_OUI_5, 0xffffff00 },
337 { }
338};
339
340static int __init bcm7xxx_phy_init(void)
341{
342 return phy_drivers_register(bcm7xxx_driver,
343 ARRAY_SIZE(bcm7xxx_driver));
344}
345
346static void __exit bcm7xxx_phy_exit(void)
347{
348 phy_drivers_unregister(bcm7xxx_driver,
349 ARRAY_SIZE(bcm7xxx_driver));
350}
351
352module_init(bcm7xxx_phy_init);
353module_exit(bcm7xxx_phy_exit);
354
355MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
356
357MODULE_DESCRIPTION("Broadcom BCM7xxx internal PHY driver");
358MODULE_LICENSE("GPL");
359MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f8c90ea75108..34088d60da74 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -25,58 +25,6 @@
25#define BRCM_PHY_REV(phydev) \ 25#define BRCM_PHY_REV(phydev) \
26 ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask)) 26 ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
27 27
28
29#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
30#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
31#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
32
33#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
34#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
35
36#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
37#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
38#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
39#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
40
41#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
42#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
43#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
44#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
45#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
46#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
47#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
48#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
49#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
50#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
51#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
52#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
53#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
54#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
55#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
56#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
57#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
58#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
59
60#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
61#define MII_BCM54XX_SHD_WRITE 0x8000
62#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
63#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
64
65/*
66 * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
67 */
68#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
69#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
70#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
71
72#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
73#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
74#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
75#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
76
77#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
78
79
80/* 28/*
81 * Broadcom LED source encodings. These are used in BCM5461, BCM5481, 29 * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
82 * BCM5482, and possibly some others. 30 * BCM5482, and possibly some others.
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 98e7cbf720a5..6a999e6814a0 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/net_tstamp.h> 28#include <linux/net_tstamp.h>
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/if_vlan.h>
30#include <linux/phy.h> 31#include <linux/phy.h>
31#include <linux/ptp_classify.h> 32#include <linux/ptp_classify.h>
32#include <linux/ptp_clock_kernel.h> 33#include <linux/ptp_clock_kernel.h>
@@ -47,6 +48,7 @@
47#define CAL_EVENT 7 48#define CAL_EVENT 7
48#define CAL_TRIGGER 7 49#define CAL_TRIGGER 7
49#define PER_TRIGGER 6 50#define PER_TRIGGER 6
51#define DP83640_N_PINS 12
50 52
51#define MII_DP83640_MICR 0x11 53#define MII_DP83640_MICR 0x11
52#define MII_DP83640_MISR 0x12 54#define MII_DP83640_MISR 0x12
@@ -173,6 +175,37 @@ MODULE_PARM_DESC(chosen_phy, \
173MODULE_PARM_DESC(gpio_tab, \ 175MODULE_PARM_DESC(gpio_tab, \
174 "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6"); 176 "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
175 177
178static void dp83640_gpio_defaults(struct ptp_pin_desc *pd)
179{
180 int i, index;
181
182 for (i = 0; i < DP83640_N_PINS; i++) {
183 snprintf(pd[i].name, sizeof(pd[i].name), "GPIO%d", 1 + i);
184 pd[i].index = i;
185 }
186
187 for (i = 0; i < GPIO_TABLE_SIZE; i++) {
188 if (gpio_tab[i] < 1 || gpio_tab[i] > DP83640_N_PINS) {
189 pr_err("gpio_tab[%d]=%hu out of range", i, gpio_tab[i]);
190 return;
191 }
192 }
193
194 index = gpio_tab[CALIBRATE_GPIO] - 1;
195 pd[index].func = PTP_PF_PHYSYNC;
196 pd[index].chan = 0;
197
198 index = gpio_tab[PEROUT_GPIO] - 1;
199 pd[index].func = PTP_PF_PEROUT;
200 pd[index].chan = 0;
201
202 for (i = EXTTS0_GPIO; i < GPIO_TABLE_SIZE; i++) {
203 index = gpio_tab[i] - 1;
204 pd[index].func = PTP_PF_EXTTS;
205 pd[index].chan = i - EXTTS0_GPIO;
206 }
207}
208
176/* a list of clocks and a mutex to protect it */ 209/* a list of clocks and a mutex to protect it */
177static LIST_HEAD(phyter_clocks); 210static LIST_HEAD(phyter_clocks);
178static DEFINE_MUTEX(phyter_clocks_lock); 211static DEFINE_MUTEX(phyter_clocks_lock);
@@ -266,15 +299,22 @@ static u64 phy2txts(struct phy_txts *p)
266 return ns; 299 return ns;
267} 300}
268 301
269static void periodic_output(struct dp83640_clock *clock, 302static int periodic_output(struct dp83640_clock *clock,
270 struct ptp_clock_request *clkreq, bool on) 303 struct ptp_clock_request *clkreq, bool on)
271{ 304{
272 struct dp83640_private *dp83640 = clock->chosen; 305 struct dp83640_private *dp83640 = clock->chosen;
273 struct phy_device *phydev = dp83640->phydev; 306 struct phy_device *phydev = dp83640->phydev;
274 u32 sec, nsec, period; 307 u32 sec, nsec, pwidth;
275 u16 gpio, ptp_trig, trigger, val; 308 u16 gpio, ptp_trig, trigger, val;
276 309
277 gpio = on ? gpio_tab[PEROUT_GPIO] : 0; 310 if (on) {
311 gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 0);
312 if (gpio < 1)
313 return -EINVAL;
314 } else {
315 gpio = 0;
316 }
317
278 trigger = PER_TRIGGER; 318 trigger = PER_TRIGGER;
279 319
280 ptp_trig = TRIG_WR | 320 ptp_trig = TRIG_WR |
@@ -291,13 +331,14 @@ static void periodic_output(struct dp83640_clock *clock,
291 ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig); 331 ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
292 ext_write(0, phydev, PAGE4, PTP_CTL, val); 332 ext_write(0, phydev, PAGE4, PTP_CTL, val);
293 mutex_unlock(&clock->extreg_lock); 333 mutex_unlock(&clock->extreg_lock);
294 return; 334 return 0;
295 } 335 }
296 336
297 sec = clkreq->perout.start.sec; 337 sec = clkreq->perout.start.sec;
298 nsec = clkreq->perout.start.nsec; 338 nsec = clkreq->perout.start.nsec;
299 period = clkreq->perout.period.sec * 1000000000UL; 339 pwidth = clkreq->perout.period.sec * 1000000000UL;
300 period += clkreq->perout.period.nsec; 340 pwidth += clkreq->perout.period.nsec;
341 pwidth /= 2;
301 342
302 mutex_lock(&clock->extreg_lock); 343 mutex_lock(&clock->extreg_lock);
303 344
@@ -310,8 +351,8 @@ static void periodic_output(struct dp83640_clock *clock,
310 ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16); /* ns[31:16] */ 351 ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16); /* ns[31:16] */
311 ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff); /* sec[15:0] */ 352 ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff); /* sec[15:0] */
312 ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16); /* sec[31:16] */ 353 ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16); /* sec[31:16] */
313 ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */ 354 ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff); /* ns[15:0] */
314 ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16); /* ns[31:16] */ 355 ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16); /* ns[31:16] */
315 356
316 /*enable trigger*/ 357 /*enable trigger*/
317 val &= ~TRIG_LOAD; 358 val &= ~TRIG_LOAD;
@@ -319,6 +360,7 @@ static void periodic_output(struct dp83640_clock *clock,
319 ext_write(0, phydev, PAGE4, PTP_CTL, val); 360 ext_write(0, phydev, PAGE4, PTP_CTL, val);
320 361
321 mutex_unlock(&clock->extreg_lock); 362 mutex_unlock(&clock->extreg_lock);
363 return 0;
322} 364}
323 365
324/* ptp clock methods */ 366/* ptp clock methods */
@@ -424,18 +466,21 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
424 struct dp83640_clock *clock = 466 struct dp83640_clock *clock =
425 container_of(ptp, struct dp83640_clock, caps); 467 container_of(ptp, struct dp83640_clock, caps);
426 struct phy_device *phydev = clock->chosen->phydev; 468 struct phy_device *phydev = clock->chosen->phydev;
427 int index; 469 unsigned int index;
428 u16 evnt, event_num, gpio_num; 470 u16 evnt, event_num, gpio_num;
429 471
430 switch (rq->type) { 472 switch (rq->type) {
431 case PTP_CLK_REQ_EXTTS: 473 case PTP_CLK_REQ_EXTTS:
432 index = rq->extts.index; 474 index = rq->extts.index;
433 if (index < 0 || index >= N_EXT_TS) 475 if (index >= N_EXT_TS)
434 return -EINVAL; 476 return -EINVAL;
435 event_num = EXT_EVENT + index; 477 event_num = EXT_EVENT + index;
436 evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT; 478 evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
437 if (on) { 479 if (on) {
438 gpio_num = gpio_tab[EXTTS0_GPIO + index]; 480 gpio_num = 1 + ptp_find_pin(clock->ptp_clock,
481 PTP_PF_EXTTS, index);
482 if (gpio_num < 1)
483 return -EINVAL;
439 evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT; 484 evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
440 if (rq->extts.flags & PTP_FALLING_EDGE) 485 if (rq->extts.flags & PTP_FALLING_EDGE)
441 evnt |= EVNT_FALL; 486 evnt |= EVNT_FALL;
@@ -448,8 +493,7 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
448 case PTP_CLK_REQ_PEROUT: 493 case PTP_CLK_REQ_PEROUT:
449 if (rq->perout.index != 0) 494 if (rq->perout.index != 0)
450 return -EINVAL; 495 return -EINVAL;
451 periodic_output(clock, rq, on); 496 return periodic_output(clock, rq, on);
452 return 0;
453 497
454 default: 498 default:
455 break; 499 break;
@@ -458,6 +502,12 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
458 return -EOPNOTSUPP; 502 return -EOPNOTSUPP;
459} 503}
460 504
505static int ptp_dp83640_verify(struct ptp_clock_info *ptp, unsigned int pin,
506 enum ptp_pin_function func, unsigned int chan)
507{
508 return 0;
509}
510
461static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 }; 511static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
462static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F }; 512static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
463 513
@@ -875,6 +925,7 @@ static void dp83640_free_clocks(void)
875 mutex_destroy(&clock->extreg_lock); 925 mutex_destroy(&clock->extreg_lock);
876 mutex_destroy(&clock->clock_lock); 926 mutex_destroy(&clock->clock_lock);
877 put_device(&clock->bus->dev); 927 put_device(&clock->bus->dev);
928 kfree(clock->caps.pin_config);
878 kfree(clock); 929 kfree(clock);
879 } 930 }
880 931
@@ -894,12 +945,18 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
894 clock->caps.n_alarm = 0; 945 clock->caps.n_alarm = 0;
895 clock->caps.n_ext_ts = N_EXT_TS; 946 clock->caps.n_ext_ts = N_EXT_TS;
896 clock->caps.n_per_out = 1; 947 clock->caps.n_per_out = 1;
948 clock->caps.n_pins = DP83640_N_PINS;
897 clock->caps.pps = 0; 949 clock->caps.pps = 0;
898 clock->caps.adjfreq = ptp_dp83640_adjfreq; 950 clock->caps.adjfreq = ptp_dp83640_adjfreq;
899 clock->caps.adjtime = ptp_dp83640_adjtime; 951 clock->caps.adjtime = ptp_dp83640_adjtime;
900 clock->caps.gettime = ptp_dp83640_gettime; 952 clock->caps.gettime = ptp_dp83640_gettime;
901 clock->caps.settime = ptp_dp83640_settime; 953 clock->caps.settime = ptp_dp83640_settime;
902 clock->caps.enable = ptp_dp83640_enable; 954 clock->caps.enable = ptp_dp83640_enable;
955 clock->caps.verify = ptp_dp83640_verify;
956 /*
957 * Convert the module param defaults into a dynamic pin configuration.
958 */
959 dp83640_gpio_defaults(clock->caps.pin_config);
903 /* 960 /*
904 * Get a reference to this bus instance. 961 * Get a reference to this bus instance.
905 */ 962 */
@@ -950,6 +1007,13 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
950 if (!clock) 1007 if (!clock)
951 goto out; 1008 goto out;
952 1009
1010 clock->caps.pin_config = kzalloc(sizeof(struct ptp_pin_desc) *
1011 DP83640_N_PINS, GFP_KERNEL);
1012 if (!clock->caps.pin_config) {
1013 kfree(clock);
1014 clock = NULL;
1015 goto out;
1016 }
953 dp83640_clock_init(clock, bus); 1017 dp83640_clock_init(clock, bus);
954 list_add_tail(&phyter_clocks, &clock->list); 1018 list_add_tail(&phyter_clocks, &clock->list);
955out: 1019out:
@@ -1363,7 +1427,7 @@ static void __exit dp83640_exit(void)
1363} 1427}
1364 1428
1365MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver"); 1429MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1366MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.at>"); 1430MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
1367MODULE_LICENSE("GPL"); 1431MODULE_LICENSE("GPL");
1368 1432
1369module_init(dp83640_init); 1433module_init(dp83640_init);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 9367acc84fbb..15bc7f9ea224 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -90,11 +90,6 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
90 return 0; 90 return 0;
91} 91}
92 92
93static int sun4i_mdio_reset(struct mii_bus *bus)
94{
95 return 0;
96}
97
98static int sun4i_mdio_probe(struct platform_device *pdev) 93static int sun4i_mdio_probe(struct platform_device *pdev)
99{ 94{
100 struct device_node *np = pdev->dev.of_node; 95 struct device_node *np = pdev->dev.of_node;
@@ -110,7 +105,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
110 bus->name = "sun4i_mii_bus"; 105 bus->name = "sun4i_mii_bus";
111 bus->read = &sun4i_mdio_read; 106 bus->read = &sun4i_mdio_read;
112 bus->write = &sun4i_mdio_write; 107 bus->write = &sun4i_mdio_write;
113 bus->reset = &sun4i_mdio_reset;
114 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); 108 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
115 bus->parent = &pdev->dev; 109 bus->parent = &pdev->dev;
116 110
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 71e49000fbf3..76f54b32a120 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -432,8 +432,28 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
432} 432}
433static DEVICE_ATTR_RO(phy_id); 433static DEVICE_ATTR_RO(phy_id);
434 434
435static ssize_t
436phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
437{
438 struct phy_device *phydev = to_phy_device(dev);
439
440 return sprintf(buf, "%s\n", phy_modes(phydev->interface));
441}
442static DEVICE_ATTR_RO(phy_interface);
443
444static ssize_t
445phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf)
446{
447 struct phy_device *phydev = to_phy_device(dev);
448
449 return sprintf(buf, "%d\n", phydev->has_fixups);
450}
451static DEVICE_ATTR_RO(phy_has_fixups);
452
435static struct attribute *mdio_dev_attrs[] = { 453static struct attribute *mdio_dev_attrs[] = {
436 &dev_attr_phy_id.attr, 454 &dev_attr_phy_id.attr,
455 &dev_attr_phy_interface.attr,
456 &dev_attr_phy_has_fixups.attr,
437 NULL, 457 NULL,
438}; 458};
439ATTRIBUTE_GROUPS(mdio_dev); 459ATTRIBUTE_GROUPS(mdio_dev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5a8993b0cafc..5ad971a55c5d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -148,15 +148,52 @@ static int ks8737_config_intr(struct phy_device *phydev)
148 return rc < 0 ? rc : 0; 148 return rc < 0 ? rc : 0;
149} 149}
150 150
151static int kszphy_setup_led(struct phy_device *phydev,
152 unsigned int reg, unsigned int shift)
153{
154
155 struct device *dev = &phydev->dev;
156 struct device_node *of_node = dev->of_node;
157 int rc, temp;
158 u32 val;
159
160 if (!of_node && dev->parent->of_node)
161 of_node = dev->parent->of_node;
162
163 if (of_property_read_u32(of_node, "micrel,led-mode", &val))
164 return 0;
165
166 temp = phy_read(phydev, reg);
167 if (temp < 0)
168 return temp;
169
170 temp &= ~(3 << shift);
171 temp |= val << shift;
172 rc = phy_write(phydev, reg, temp);
173
174 return rc < 0 ? rc : 0;
175}
176
151static int kszphy_config_init(struct phy_device *phydev) 177static int kszphy_config_init(struct phy_device *phydev)
152{ 178{
153 return 0; 179 return 0;
154} 180}
155 181
182static int kszphy_config_init_led8041(struct phy_device *phydev)
183{
184 /* single led control, register 0x1e bits 15..14 */
185 return kszphy_setup_led(phydev, 0x1e, 14);
186}
187
156static int ksz8021_config_init(struct phy_device *phydev) 188static int ksz8021_config_init(struct phy_device *phydev)
157{ 189{
158 int rc;
159 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE; 190 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
191 int rc;
192
193 rc = kszphy_setup_led(phydev, 0x1f, 4);
194 if (rc)
195 dev_err(&phydev->dev, "failed to set led mode\n");
196
160 phy_write(phydev, MII_KSZPHY_OMSO, val); 197 phy_write(phydev, MII_KSZPHY_OMSO, val);
161 rc = ksz_config_flags(phydev); 198 rc = ksz_config_flags(phydev);
162 return rc < 0 ? rc : 0; 199 return rc < 0 ? rc : 0;
@@ -166,6 +203,10 @@ static int ks8051_config_init(struct phy_device *phydev)
166{ 203{
167 int rc; 204 int rc;
168 205
206 rc = kszphy_setup_led(phydev, 0x1f, 4);
207 if (rc)
208 dev_err(&phydev->dev, "failed to set led mode\n");
209
169 rc = ksz_config_flags(phydev); 210 rc = ksz_config_flags(phydev);
170 return rc < 0 ? rc : 0; 211 return rc < 0 ? rc : 0;
171} 212}
@@ -327,7 +368,7 @@ static struct phy_driver ksphy_driver[] = {
327 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 368 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
328 | SUPPORTED_Asym_Pause), 369 | SUPPORTED_Asym_Pause),
329 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 370 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
330 .config_init = kszphy_config_init, 371 .config_init = kszphy_config_init_led8041,
331 .config_aneg = genphy_config_aneg, 372 .config_aneg = genphy_config_aneg,
332 .read_status = genphy_read_status, 373 .read_status = genphy_read_status,
333 .ack_interrupt = kszphy_ack_interrupt, 374 .ack_interrupt = kszphy_ack_interrupt,
@@ -342,7 +383,7 @@ static struct phy_driver ksphy_driver[] = {
342 .features = PHY_BASIC_FEATURES | 383 .features = PHY_BASIC_FEATURES |
343 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 384 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
344 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 385 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
345 .config_init = kszphy_config_init, 386 .config_init = kszphy_config_init_led8041,
346 .config_aneg = genphy_config_aneg, 387 .config_aneg = genphy_config_aneg,
347 .read_status = genphy_read_status, 388 .read_status = genphy_read_status,
348 .ack_interrupt = kszphy_ack_interrupt, 389 .ack_interrupt = kszphy_ack_interrupt,
@@ -371,7 +412,7 @@ static struct phy_driver ksphy_driver[] = {
371 .phy_id_mask = 0x00ffffff, 412 .phy_id_mask = 0x00ffffff,
372 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), 413 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
373 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 414 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
374 .config_init = kszphy_config_init, 415 .config_init = kszphy_config_init_led8041,
375 .config_aneg = genphy_config_aneg, 416 .config_aneg = genphy_config_aneg,
376 .read_status = genphy_read_status, 417 .read_status = genphy_read_status,
377 .ack_interrupt = kszphy_ack_interrupt, 418 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 76d96b9ebcdb..1d788f19135b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -38,6 +38,26 @@
38 38
39#include <asm/irq.h> 39#include <asm/irq.h>
40 40
41static const char *phy_speed_to_str(int speed)
42{
43 switch (speed) {
44 case SPEED_10:
45 return "10Mbps";
46 case SPEED_100:
47 return "100Mbps";
48 case SPEED_1000:
49 return "1Gbps";
50 case SPEED_2500:
51 return "2.5Gbps";
52 case SPEED_10000:
53 return "10Gbps";
54 case SPEED_UNKNOWN:
55 return "Unknown";
56 default:
57 return "Unsupported (update phy.c)";
58 }
59}
60
41/** 61/**
42 * phy_print_status - Convenience function to print out the current phy status 62 * phy_print_status - Convenience function to print out the current phy status
43 * @phydev: the phy_device struct 63 * @phydev: the phy_device struct
@@ -45,12 +65,13 @@
45void phy_print_status(struct phy_device *phydev) 65void phy_print_status(struct phy_device *phydev)
46{ 66{
47 if (phydev->link) { 67 if (phydev->link) {
48 pr_info("%s - Link is Up - %d/%s\n", 68 netdev_info(phydev->attached_dev,
49 dev_name(&phydev->dev), 69 "Link is Up - %s/%s - flow control %s\n",
50 phydev->speed, 70 phy_speed_to_str(phydev->speed),
51 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 71 DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
72 phydev->pause ? "rx/tx" : "off");
52 } else { 73 } else {
53 pr_info("%s - Link is Down\n", dev_name(&phydev->dev)); 74 netdev_info(phydev->attached_dev, "Link is Down\n");
54 } 75 }
55} 76}
56EXPORT_SYMBOL(phy_print_status); 77EXPORT_SYMBOL(phy_print_status);
@@ -62,7 +83,7 @@ EXPORT_SYMBOL(phy_print_status);
62 * If the @phydev driver has an ack_interrupt function, call it to 83 * If the @phydev driver has an ack_interrupt function, call it to
63 * ack and clear the phy device's interrupt. 84 * ack and clear the phy device's interrupt.
64 * 85 *
65 * Returns 0 on success on < 0 on error. 86 * Returns 0 on success or < 0 on error.
66 */ 87 */
67static int phy_clear_interrupt(struct phy_device *phydev) 88static int phy_clear_interrupt(struct phy_device *phydev)
68{ 89{
@@ -77,7 +98,7 @@ static int phy_clear_interrupt(struct phy_device *phydev)
77 * @phydev: the phy_device struct 98 * @phydev: the phy_device struct
78 * @interrupts: interrupt flags to configure for this @phydev 99 * @interrupts: interrupt flags to configure for this @phydev
79 * 100 *
80 * Returns 0 on success on < 0 on error. 101 * Returns 0 on success or < 0 on error.
81 */ 102 */
82static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 103static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
83{ 104{
@@ -93,15 +114,16 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
93 * phy_aneg_done - return auto-negotiation status 114 * phy_aneg_done - return auto-negotiation status
94 * @phydev: target phy_device struct 115 * @phydev: target phy_device struct
95 * 116 *
96 * Description: Reads the status register and returns 0 either if 117 * Description: Return the auto-negotiation status from this @phydev
97 * auto-negotiation is incomplete, or if there was an error. 118 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
98 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done. 119 * is still pending.
99 */ 120 */
100static inline int phy_aneg_done(struct phy_device *phydev) 121static inline int phy_aneg_done(struct phy_device *phydev)
101{ 122{
102 int retval = phy_read(phydev, MII_BMSR); 123 if (phydev->drv->aneg_done)
124 return phydev->drv->aneg_done(phydev);
103 125
104 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); 126 return genphy_aneg_done(phydev);
105} 127}
106 128
107/* A structure for mapping a particular speed and duplex 129/* A structure for mapping a particular speed and duplex
@@ -283,7 +305,10 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
283 305
284 ethtool_cmd_speed_set(cmd, phydev->speed); 306 ethtool_cmd_speed_set(cmd, phydev->speed);
285 cmd->duplex = phydev->duplex; 307 cmd->duplex = phydev->duplex;
286 cmd->port = PORT_MII; 308 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
309 cmd->port = PORT_BNC;
310 else
311 cmd->port = PORT_MII;
287 cmd->phy_address = phydev->addr; 312 cmd->phy_address = phydev->addr;
288 cmd->transceiver = phy_is_internal(phydev) ? 313 cmd->transceiver = phy_is_internal(phydev) ?
289 XCVR_INTERNAL : XCVR_EXTERNAL; 314 XCVR_INTERNAL : XCVR_EXTERNAL;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 2f6989b1e0dc..0ce606624296 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -139,6 +139,7 @@ static int phy_scan_fixups(struct phy_device *phydev)
139 mutex_unlock(&phy_fixup_lock); 139 mutex_unlock(&phy_fixup_lock);
140 return err; 140 return err;
141 } 141 }
142 phydev->has_fixups = true;
142 } 143 }
143 } 144 }
144 mutex_unlock(&phy_fixup_lock); 145 mutex_unlock(&phy_fixup_lock);
@@ -534,16 +535,16 @@ static int phy_poll_reset(struct phy_device *phydev)
534 535
535int phy_init_hw(struct phy_device *phydev) 536int phy_init_hw(struct phy_device *phydev)
536{ 537{
537 int ret; 538 int ret = 0;
538 539
539 if (!phydev->drv || !phydev->drv->config_init) 540 if (!phydev->drv || !phydev->drv->config_init)
540 return 0; 541 return 0;
541 542
542 ret = phy_write(phydev, MII_BMCR, BMCR_RESET); 543 if (phydev->drv->soft_reset)
543 if (ret < 0) 544 ret = phydev->drv->soft_reset(phydev);
544 return ret; 545 else
546 ret = genphy_soft_reset(phydev);
545 547
546 ret = phy_poll_reset(phydev);
547 if (ret < 0) 548 if (ret < 0)
548 return ret; 549 return ret;
549 550
@@ -864,6 +865,22 @@ int genphy_config_aneg(struct phy_device *phydev)
864} 865}
865EXPORT_SYMBOL(genphy_config_aneg); 866EXPORT_SYMBOL(genphy_config_aneg);
866 867
868/**
869 * genphy_aneg_done - return auto-negotiation status
870 * @phydev: target phy_device struct
871 *
872 * Description: Reads the status register and returns 0 either if
873 * auto-negotiation is incomplete, or if there was an error.
874 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
875 */
876int genphy_aneg_done(struct phy_device *phydev)
877{
878 int retval = phy_read(phydev, MII_BMSR);
879
880 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
881}
882EXPORT_SYMBOL(genphy_aneg_done);
883
867static int gen10g_config_aneg(struct phy_device *phydev) 884static int gen10g_config_aneg(struct phy_device *phydev)
868{ 885{
869 return 0; 886 return 0;
@@ -1029,6 +1046,27 @@ static int gen10g_read_status(struct phy_device *phydev)
1029 return 0; 1046 return 0;
1030} 1047}
1031 1048
1049/**
1050 * genphy_soft_reset - software reset the PHY via BMCR_RESET bit
1051 * @phydev: target phy_device struct
1052 *
1053 * Description: Perform a software PHY reset using the standard
1054 * BMCR_RESET bit and poll for the reset bit to be cleared.
1055 *
1056 * Returns: 0 on success, < 0 on failure
1057 */
1058int genphy_soft_reset(struct phy_device *phydev)
1059{
1060 int ret;
1061
1062 ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
1063 if (ret < 0)
1064 return ret;
1065
1066 return phy_poll_reset(phydev);
1067}
1068EXPORT_SYMBOL(genphy_soft_reset);
1069
1032static int genphy_config_init(struct phy_device *phydev) 1070static int genphy_config_init(struct phy_device *phydev)
1033{ 1071{
1034 int val; 1072 int val;
@@ -1075,6 +1113,12 @@ static int genphy_config_init(struct phy_device *phydev)
1075 return 0; 1113 return 0;
1076} 1114}
1077 1115
1116static int gen10g_soft_reset(struct phy_device *phydev)
1117{
1118 /* Do nothing for now */
1119 return 0;
1120}
1121
1078static int gen10g_config_init(struct phy_device *phydev) 1122static int gen10g_config_init(struct phy_device *phydev)
1079{ 1123{
1080 /* Temporarily just say we support everything */ 1124 /* Temporarily just say we support everything */
@@ -1249,9 +1293,11 @@ static struct phy_driver genphy_driver[] = {
1249 .phy_id = 0xffffffff, 1293 .phy_id = 0xffffffff,
1250 .phy_id_mask = 0xffffffff, 1294 .phy_id_mask = 0xffffffff,
1251 .name = "Generic PHY", 1295 .name = "Generic PHY",
1296 .soft_reset = genphy_soft_reset,
1252 .config_init = genphy_config_init, 1297 .config_init = genphy_config_init,
1253 .features = 0, 1298 .features = 0,
1254 .config_aneg = genphy_config_aneg, 1299 .config_aneg = genphy_config_aneg,
1300 .aneg_done = genphy_aneg_done,
1255 .read_status = genphy_read_status, 1301 .read_status = genphy_read_status,
1256 .suspend = genphy_suspend, 1302 .suspend = genphy_suspend,
1257 .resume = genphy_resume, 1303 .resume = genphy_resume,
@@ -1260,6 +1306,7 @@ static struct phy_driver genphy_driver[] = {
1260 .phy_id = 0xffffffff, 1306 .phy_id = 0xffffffff,
1261 .phy_id_mask = 0xffffffff, 1307 .phy_id_mask = 0xffffffff,
1262 .name = "Generic 10G PHY", 1308 .name = "Generic 10G PHY",
1309 .soft_reset = gen10g_soft_reset,
1263 .config_init = gen10g_config_init, 1310 .config_init = gen10g_config_init,
1264 .features = 0, 1311 .features = 0,
1265 .config_aneg = gen10g_config_aneg, 1312 .config_aneg = gen10g_config_aneg,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 72ff14b811c6..e3923ebb693f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -143,9 +143,8 @@ struct ppp {
143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
144#endif /* CONFIG_PPP_MULTILINK */ 144#endif /* CONFIG_PPP_MULTILINK */
145#ifdef CONFIG_PPP_FILTER 145#ifdef CONFIG_PPP_FILTER
146 struct sock_filter *pass_filter; /* filter for packets to pass */ 146 struct sk_filter *pass_filter; /* filter for packets to pass */
147 struct sock_filter *active_filter;/* filter for pkts to reset idle */ 147 struct sk_filter *active_filter;/* filter for pkts to reset idle */
148 unsigned pass_len, active_len;
149#endif /* CONFIG_PPP_FILTER */ 148#endif /* CONFIG_PPP_FILTER */
150 struct net *ppp_net; /* the net we belong to */ 149 struct net *ppp_net; /* the net we belong to */
151 struct ppp_link_stats stats64; /* 64 bit network stats */ 150 struct ppp_link_stats stats64; /* 64 bit network stats */
@@ -755,28 +754,42 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
755 case PPPIOCSPASS: 754 case PPPIOCSPASS:
756 { 755 {
757 struct sock_filter *code; 756 struct sock_filter *code;
757
758 err = get_filter(argp, &code); 758 err = get_filter(argp, &code);
759 if (err >= 0) { 759 if (err >= 0) {
760 struct sock_fprog fprog = {
761 .len = err,
762 .filter = code,
763 };
764
760 ppp_lock(ppp); 765 ppp_lock(ppp);
761 kfree(ppp->pass_filter); 766 if (ppp->pass_filter)
762 ppp->pass_filter = code; 767 sk_unattached_filter_destroy(ppp->pass_filter);
763 ppp->pass_len = err; 768 err = sk_unattached_filter_create(&ppp->pass_filter,
769 &fprog);
770 kfree(code);
764 ppp_unlock(ppp); 771 ppp_unlock(ppp);
765 err = 0;
766 } 772 }
767 break; 773 break;
768 } 774 }
769 case PPPIOCSACTIVE: 775 case PPPIOCSACTIVE:
770 { 776 {
771 struct sock_filter *code; 777 struct sock_filter *code;
778
772 err = get_filter(argp, &code); 779 err = get_filter(argp, &code);
773 if (err >= 0) { 780 if (err >= 0) {
781 struct sock_fprog fprog = {
782 .len = err,
783 .filter = code,
784 };
785
774 ppp_lock(ppp); 786 ppp_lock(ppp);
775 kfree(ppp->active_filter); 787 if (ppp->active_filter)
776 ppp->active_filter = code; 788 sk_unattached_filter_destroy(ppp->active_filter);
777 ppp->active_len = err; 789 err = sk_unattached_filter_create(&ppp->active_filter,
790 &fprog);
791 kfree(code);
778 ppp_unlock(ppp); 792 ppp_unlock(ppp);
779 err = 0;
780 } 793 }
781 break; 794 break;
782 } 795 }
@@ -1184,7 +1197,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1184 a four-byte PPP header on each packet */ 1197 a four-byte PPP header on each packet */
1185 *skb_push(skb, 2) = 1; 1198 *skb_push(skb, 2) = 1;
1186 if (ppp->pass_filter && 1199 if (ppp->pass_filter &&
1187 sk_run_filter(skb, ppp->pass_filter) == 0) { 1200 SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
1188 if (ppp->debug & 1) 1201 if (ppp->debug & 1)
1189 netdev_printk(KERN_DEBUG, ppp->dev, 1202 netdev_printk(KERN_DEBUG, ppp->dev,
1190 "PPP: outbound frame " 1203 "PPP: outbound frame "
@@ -1194,7 +1207,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1194 } 1207 }
1195 /* if this packet passes the active filter, record the time */ 1208 /* if this packet passes the active filter, record the time */
1196 if (!(ppp->active_filter && 1209 if (!(ppp->active_filter &&
1197 sk_run_filter(skb, ppp->active_filter) == 0)) 1210 SK_RUN_FILTER(ppp->active_filter, skb) == 0))
1198 ppp->last_xmit = jiffies; 1211 ppp->last_xmit = jiffies;
1199 skb_pull(skb, 2); 1212 skb_pull(skb, 2);
1200#else 1213#else
@@ -1818,7 +1831,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1818 1831
1819 *skb_push(skb, 2) = 0; 1832 *skb_push(skb, 2) = 0;
1820 if (ppp->pass_filter && 1833 if (ppp->pass_filter &&
1821 sk_run_filter(skb, ppp->pass_filter) == 0) { 1834 SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
1822 if (ppp->debug & 1) 1835 if (ppp->debug & 1)
1823 netdev_printk(KERN_DEBUG, ppp->dev, 1836 netdev_printk(KERN_DEBUG, ppp->dev,
1824 "PPP: inbound frame " 1837 "PPP: inbound frame "
@@ -1827,7 +1840,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1827 return; 1840 return;
1828 } 1841 }
1829 if (!(ppp->active_filter && 1842 if (!(ppp->active_filter &&
1830 sk_run_filter(skb, ppp->active_filter) == 0)) 1843 SK_RUN_FILTER(ppp->active_filter, skb) == 0))
1831 ppp->last_recv = jiffies; 1844 ppp->last_recv = jiffies;
1832 __skb_pull(skb, 2); 1845 __skb_pull(skb, 2);
1833 } else 1846 } else
@@ -2672,6 +2685,10 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2672 ppp->minseq = -1; 2685 ppp->minseq = -1;
2673 skb_queue_head_init(&ppp->mrq); 2686 skb_queue_head_init(&ppp->mrq);
2674#endif /* CONFIG_PPP_MULTILINK */ 2687#endif /* CONFIG_PPP_MULTILINK */
2688#ifdef CONFIG_PPP_FILTER
2689 ppp->pass_filter = NULL;
2690 ppp->active_filter = NULL;
2691#endif /* CONFIG_PPP_FILTER */
2675 2692
2676 /* 2693 /*
2677 * drum roll: don't forget to set 2694 * drum roll: don't forget to set
@@ -2802,10 +2819,15 @@ static void ppp_destroy_interface(struct ppp *ppp)
2802 skb_queue_purge(&ppp->mrq); 2819 skb_queue_purge(&ppp->mrq);
2803#endif /* CONFIG_PPP_MULTILINK */ 2820#endif /* CONFIG_PPP_MULTILINK */
2804#ifdef CONFIG_PPP_FILTER 2821#ifdef CONFIG_PPP_FILTER
2805 kfree(ppp->pass_filter); 2822 if (ppp->pass_filter) {
2806 ppp->pass_filter = NULL; 2823 sk_unattached_filter_destroy(ppp->pass_filter);
2807 kfree(ppp->active_filter); 2824 ppp->pass_filter = NULL;
2808 ppp->active_filter = NULL; 2825 }
2826
2827 if (ppp->active_filter) {
2828 sk_unattached_filter_destroy(ppp->active_filter);
2829 ppp->active_filter = NULL;
2830 }
2809#endif /* CONFIG_PPP_FILTER */ 2831#endif /* CONFIG_PPP_FILTER */
2810 2832
2811 kfree_skb(ppp->xmit_pending); 2833 kfree_skb(ppp->xmit_pending);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c8624a8235ab..33008c1d1d67 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1031,8 +1031,7 @@ static void team_port_leave(struct team *team, struct team_port *port)
1031} 1031}
1032 1032
1033#ifdef CONFIG_NET_POLL_CONTROLLER 1033#ifdef CONFIG_NET_POLL_CONTROLLER
1034static int team_port_enable_netpoll(struct team *team, struct team_port *port, 1034static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1035 gfp_t gfp)
1036{ 1035{
1037 struct netpoll *np; 1036 struct netpoll *np;
1038 int err; 1037 int err;
@@ -1040,11 +1039,11 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
1040 if (!team->dev->npinfo) 1039 if (!team->dev->npinfo)
1041 return 0; 1040 return 0;
1042 1041
1043 np = kzalloc(sizeof(*np), gfp); 1042 np = kzalloc(sizeof(*np), GFP_KERNEL);
1044 if (!np) 1043 if (!np)
1045 return -ENOMEM; 1044 return -ENOMEM;
1046 1045
1047 err = __netpoll_setup(np, port->dev, gfp); 1046 err = __netpoll_setup(np, port->dev);
1048 if (err) { 1047 if (err) {
1049 kfree(np); 1048 kfree(np);
1050 return err; 1049 return err;
@@ -1067,8 +1066,7 @@ static void team_port_disable_netpoll(struct team_port *port)
1067 kfree(np); 1066 kfree(np);
1068} 1067}
1069#else 1068#else
1070static int team_port_enable_netpoll(struct team *team, struct team_port *port, 1069static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1071 gfp_t gfp)
1072{ 1070{
1073 return 0; 1071 return 0;
1074} 1072}
@@ -1156,7 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1156 goto err_vids_add; 1154 goto err_vids_add;
1157 } 1155 }
1158 1156
1159 err = team_port_enable_netpoll(team, port, GFP_KERNEL); 1157 err = team_port_enable_netpoll(team, port);
1160 if (err) { 1158 if (err) {
1161 netdev_err(dev, "Failed to enable netpoll on device %s\n", 1159 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1162 portname); 1160 portname);
@@ -1540,16 +1538,10 @@ static int team_init(struct net_device *dev)
1540 mutex_init(&team->lock); 1538 mutex_init(&team->lock);
1541 team_set_no_mode(team); 1539 team_set_no_mode(team);
1542 1540
1543 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); 1541 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1544 if (!team->pcpu_stats) 1542 if (!team->pcpu_stats)
1545 return -ENOMEM; 1543 return -ENOMEM;
1546 1544
1547 for_each_possible_cpu(i) {
1548 struct team_pcpu_stats *team_stats;
1549 team_stats = per_cpu_ptr(team->pcpu_stats, i);
1550 u64_stats_init(&team_stats->syncp);
1551 }
1552
1553 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1545 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1554 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1546 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1555 INIT_LIST_HEAD(&team->port_list); 1547 INIT_LIST_HEAD(&team->port_list);
@@ -1767,13 +1759,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1767 for_each_possible_cpu(i) { 1759 for_each_possible_cpu(i) {
1768 p = per_cpu_ptr(team->pcpu_stats, i); 1760 p = per_cpu_ptr(team->pcpu_stats, i);
1769 do { 1761 do {
1770 start = u64_stats_fetch_begin_bh(&p->syncp); 1762 start = u64_stats_fetch_begin_irq(&p->syncp);
1771 rx_packets = p->rx_packets; 1763 rx_packets = p->rx_packets;
1772 rx_bytes = p->rx_bytes; 1764 rx_bytes = p->rx_bytes;
1773 rx_multicast = p->rx_multicast; 1765 rx_multicast = p->rx_multicast;
1774 tx_packets = p->tx_packets; 1766 tx_packets = p->tx_packets;
1775 tx_bytes = p->tx_bytes; 1767 tx_bytes = p->tx_bytes;
1776 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 1768 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1777 1769
1778 stats->rx_packets += rx_packets; 1770 stats->rx_packets += rx_packets;
1779 stats->rx_bytes += rx_bytes; 1771 stats->rx_bytes += rx_bytes;
@@ -1856,7 +1848,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
1856} 1848}
1857 1849
1858static int team_netpoll_setup(struct net_device *dev, 1850static int team_netpoll_setup(struct net_device *dev,
1859 struct netpoll_info *npifo, gfp_t gfp) 1851 struct netpoll_info *npifo)
1860{ 1852{
1861 struct team *team = netdev_priv(dev); 1853 struct team *team = netdev_priv(dev);
1862 struct team_port *port; 1854 struct team_port *port;
@@ -1864,7 +1856,7 @@ static int team_netpoll_setup(struct net_device *dev,
1864 1856
1865 mutex_lock(&team->lock); 1857 mutex_lock(&team->lock);
1866 list_for_each_entry(port, &team->port_list, list) { 1858 list_for_each_entry(port, &team->port_list, list) {
1867 err = team_port_enable_netpoll(team, port, gfp); 1859 err = team_port_enable_netpoll(team, port);
1868 if (err) { 1860 if (err) {
1869 __team_netpoll_cleanup(team); 1861 __team_netpoll_cleanup(team);
1870 break; 1862 break;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index d671fc3ac5ac..dbde3412ee5e 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -432,9 +432,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
432 struct lb_stats tmp; 432 struct lb_stats tmp;
433 433
434 do { 434 do {
435 start = u64_stats_fetch_begin_bh(syncp); 435 start = u64_stats_fetch_begin_irq(syncp);
436 tmp.tx_bytes = cpu_stats->tx_bytes; 436 tmp.tx_bytes = cpu_stats->tx_bytes;
437 } while (u64_stats_fetch_retry_bh(syncp, start)); 437 } while (u64_stats_fetch_retry_irq(syncp, start));
438 acc_stats->tx_bytes += tmp.tx_bytes; 438 acc_stats->tx_bytes += tmp.tx_bytes;
439} 439}
440 440
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 26f8635b027d..ee328ba101e7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -452,7 +452,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
452 452
453 --tun->numqueues; 453 --tun->numqueues;
454 if (clean) { 454 if (clean) {
455 rcu_assign_pointer(tfile->tun, NULL); 455 RCU_INIT_POINTER(tfile->tun, NULL);
456 sock_put(&tfile->sk); 456 sock_put(&tfile->sk);
457 } else 457 } else
458 tun_disable_queue(tun, tfile); 458 tun_disable_queue(tun, tfile);
@@ -499,12 +499,12 @@ static void tun_detach_all(struct net_device *dev)
499 tfile = rtnl_dereference(tun->tfiles[i]); 499 tfile = rtnl_dereference(tun->tfiles[i]);
500 BUG_ON(!tfile); 500 BUG_ON(!tfile);
501 wake_up_all(&tfile->wq.wait); 501 wake_up_all(&tfile->wq.wait);
502 rcu_assign_pointer(tfile->tun, NULL); 502 RCU_INIT_POINTER(tfile->tun, NULL);
503 --tun->numqueues; 503 --tun->numqueues;
504 } 504 }
505 list_for_each_entry(tfile, &tun->disabled, next) { 505 list_for_each_entry(tfile, &tun->disabled, next) {
506 wake_up_all(&tfile->wq.wait); 506 wake_up_all(&tfile->wq.wait);
507 rcu_assign_pointer(tfile->tun, NULL); 507 RCU_INIT_POINTER(tfile->tun, NULL);
508 } 508 }
509 BUG_ON(tun->numqueues != 0); 509 BUG_ON(tun->numqueues != 0);
510 510
@@ -2194,7 +2194,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2194 &tun_proto); 2194 &tun_proto);
2195 if (!tfile) 2195 if (!tfile)
2196 return -ENOMEM; 2196 return -ENOMEM;
2197 rcu_assign_pointer(tfile->tun, NULL); 2197 RCU_INIT_POINTER(tfile->tun, NULL);
2198 tfile->net = get_net(current->nsproxy->net_ns); 2198 tfile->net = get_net(current->nsproxy->net_ns);
2199 tfile->flags = 0; 2199 tfile->flags = 0;
2200 tfile->ifindex = 0; 2200 tfile->ifindex = 0;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index bd363b27e854..9ea4bfe5d318 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -625,6 +625,13 @@ static const struct usb_device_id products[] = {
625 .driver_info = 0, 625 .driver_info = 0,
626}, 626},
627 627
628/* Novatel Expedite E371 - handled by qmi_wwan */
629{
630 USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9011, USB_CLASS_COMM,
631 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
632 .driver_info = 0,
633},
634
628/* AnyDATA ADU960S - handled by qmi_wwan */ 635/* AnyDATA ADU960S - handled by qmi_wwan */
629{ 636{
630 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, 637 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d350d2795e10..549dbac710ed 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -73,6 +73,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
73 u8 iface_no; 73 u8 iface_no;
74 int err; 74 int err;
75 int eth_hlen; 75 int eth_hlen;
76 u16 mbim_mtu;
76 u16 ntb_fmt_supported; 77 u16 ntb_fmt_supported;
77 __le16 max_datagram_size; 78 __le16 max_datagram_size;
78 79
@@ -252,6 +253,14 @@ out:
252 /* set MTU to max supported by the device if necessary */ 253 /* set MTU to max supported by the device if necessary */
253 if (dev->net->mtu > ctx->max_datagram_size - eth_hlen) 254 if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
254 dev->net->mtu = ctx->max_datagram_size - eth_hlen; 255 dev->net->mtu = ctx->max_datagram_size - eth_hlen;
256
257 /* do not exceed operater preferred MTU */
258 if (ctx->mbim_extended_desc) {
259 mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
260 if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
261 dev->net->mtu = mbim_mtu;
262 }
263
255 return 0; 264 return 0;
256} 265}
257 266
@@ -390,6 +399,14 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
390 ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf; 399 ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
391 break; 400 break;
392 401
402 case USB_CDC_MBIM_EXTENDED_TYPE:
403 if (buf[0] < sizeof(*(ctx->mbim_extended_desc)))
404 break;
405
406 ctx->mbim_extended_desc =
407 (const struct usb_cdc_mbim_extended_desc *)buf;
408 break;
409
393 default: 410 default:
394 break; 411 break;
395 } 412 }
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index acfcc32b323d..8f37efd2d2fb 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -210,7 +210,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
210 * (0x86dd) so Linux can understand it. 210 * (0x86dd) so Linux can understand it.
211 */ 211 */
212 if ((buf->data[sizeof(*ethhdr)] & 0xf0) == 0x60) 212 if ((buf->data[sizeof(*ethhdr)] & 0xf0) == 0x60)
213 ethhdr->h_proto = __constant_htons(ETH_P_IPV6); 213 ethhdr->h_proto = htons(ETH_P_IPV6);
214 } 214 }
215 215
216 if (count) { 216 if (count) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 313cb6cd4848..e3458e3c44f1 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -500,6 +500,13 @@ static const struct usb_device_id products[] = {
500 USB_CDC_PROTO_NONE), 500 USB_CDC_PROTO_NONE),
501 .driver_info = (unsigned long)&qmi_wwan_info, 501 .driver_info = (unsigned long)&qmi_wwan_info,
502 }, 502 },
503 { /* Novatel Expedite E371 */
504 USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9011,
505 USB_CLASS_COMM,
506 USB_CDC_SUBCLASS_ETHERNET,
507 USB_CDC_PROTO_NONE),
508 .driver_info = (unsigned long)&qmi_wwan_info,
509 },
503 { /* Dell Wireless 5800 (Novatel E362) */ 510 { /* Dell Wireless 5800 (Novatel E362) */
504 USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195, 511 USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
505 USB_CLASS_COMM, 512 USB_CLASS_COMM,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index adb12f349a61..18e12a3f7fc3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -21,9 +21,10 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/ip.h> 22#include <linux/ip.h>
23#include <linux/ipv6.h> 23#include <linux/ipv6.h>
24#include <net/ip6_checksum.h>
24 25
25/* Version Information */ 26/* Version Information */
26#define DRIVER_VERSION "v1.04.0 (2014/01/15)" 27#define DRIVER_VERSION "v1.06.0 (2014/03/03)"
27#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 28#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
28#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 29#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
29#define MODULENAME "r8152" 30#define MODULENAME "r8152"
@@ -59,9 +60,11 @@
59#define PLA_TCR0 0xe610 60#define PLA_TCR0 0xe610
60#define PLA_TCR1 0xe612 61#define PLA_TCR1 0xe612
61#define PLA_TXFIFO_CTRL 0xe618 62#define PLA_TXFIFO_CTRL 0xe618
62#define PLA_RSTTELLY 0xe800 63#define PLA_RSTTALLY 0xe800
63#define PLA_CR 0xe813 64#define PLA_CR 0xe813
64#define PLA_CRWECR 0xe81c 65#define PLA_CRWECR 0xe81c
66#define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */
67#define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */
65#define PLA_CONFIG5 0xe822 68#define PLA_CONFIG5 0xe822
66#define PLA_PHY_PWR 0xe84c 69#define PLA_PHY_PWR 0xe84c
67#define PLA_OOB_CTRL 0xe84f 70#define PLA_OOB_CTRL 0xe84f
@@ -69,7 +72,7 @@
69#define PLA_MISC_0 0xe858 72#define PLA_MISC_0 0xe858
70#define PLA_MISC_1 0xe85a 73#define PLA_MISC_1 0xe85a
71#define PLA_OCP_GPHY_BASE 0xe86c 74#define PLA_OCP_GPHY_BASE 0xe86c
72#define PLA_TELLYCNT 0xe890 75#define PLA_TALLYCNT 0xe890
73#define PLA_SFF_STS_7 0xe8de 76#define PLA_SFF_STS_7 0xe8de
74#define PLA_PHYSTATUS 0xe908 77#define PLA_PHYSTATUS 0xe908
75#define PLA_BP_BA 0xfc26 78#define PLA_BP_BA 0xfc26
@@ -177,6 +180,9 @@
177/* PLA_TCR1 */ 180/* PLA_TCR1 */
178#define VERSION_MASK 0x7cf0 181#define VERSION_MASK 0x7cf0
179 182
183/* PLA_RSTTALLY */
184#define TALLY_RESET 0x0001
185
180/* PLA_CR */ 186/* PLA_CR */
181#define CR_RST 0x10 187#define CR_RST 0x10
182#define CR_RE 0x08 188#define CR_RE 0x08
@@ -216,7 +222,14 @@
216/* PAL_BDC_CR */ 222/* PAL_BDC_CR */
217#define ALDPS_PROXY_MODE 0x0001 223#define ALDPS_PROXY_MODE 0x0001
218 224
225/* PLA_CONFIG34 */
226#define LINK_ON_WAKE_EN 0x0010
227#define LINK_OFF_WAKE_EN 0x0008
228
219/* PLA_CONFIG5 */ 229/* PLA_CONFIG5 */
230#define BWF_EN 0x0040
231#define MWF_EN 0x0020
232#define UWF_EN 0x0010
220#define LAN_WAKE_EN 0x0002 233#define LAN_WAKE_EN 0x0002
221 234
222/* PLA_LED_FEATURE */ 235/* PLA_LED_FEATURE */
@@ -436,6 +449,9 @@ enum rtl8152_flags {
436 RTL8152_SET_RX_MODE, 449 RTL8152_SET_RX_MODE,
437 WORK_ENABLE, 450 WORK_ENABLE,
438 RTL8152_LINK_CHG, 451 RTL8152_LINK_CHG,
452 SELECTIVE_SUSPEND,
453 PHY_RESET,
454 SCHEDULE_TASKLET,
439}; 455};
440 456
441/* Define these values to match your device */ 457/* Define these values to match your device */
@@ -449,11 +465,40 @@ enum rtl8152_flags {
449#define MCU_TYPE_PLA 0x0100 465#define MCU_TYPE_PLA 0x0100
450#define MCU_TYPE_USB 0x0000 466#define MCU_TYPE_USB 0x0000
451 467
468#define REALTEK_USB_DEVICE(vend, prod) \
469 USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
470
471struct tally_counter {
472 __le64 tx_packets;
473 __le64 rx_packets;
474 __le64 tx_errors;
475 __le32 rx_errors;
476 __le16 rx_missed;
477 __le16 align_errors;
478 __le32 tx_one_collision;
479 __le32 tx_multi_collision;
480 __le64 rx_unicast;
481 __le64 rx_broadcast;
482 __le32 rx_multicast;
483 __le16 tx_aborted;
484 __le16 tx_underun;
485};
486
452struct rx_desc { 487struct rx_desc {
453 __le32 opts1; 488 __le32 opts1;
454#define RX_LEN_MASK 0x7fff 489#define RX_LEN_MASK 0x7fff
490
455 __le32 opts2; 491 __le32 opts2;
492#define RD_UDP_CS (1 << 23)
493#define RD_TCP_CS (1 << 22)
494#define RD_IPV6_CS (1 << 20)
495#define RD_IPV4_CS (1 << 19)
496
456 __le32 opts3; 497 __le32 opts3;
498#define IPF (1 << 23) /* IP checksum fail */
499#define UDPF (1 << 22) /* UDP checksum fail */
500#define TCPF (1 << 21) /* TCP checksum fail */
501
457 __le32 opts4; 502 __le32 opts4;
458 __le32 opts5; 503 __le32 opts5;
459 __le32 opts6; 504 __le32 opts6;
@@ -463,13 +508,21 @@ struct tx_desc {
463 __le32 opts1; 508 __le32 opts1;
464#define TX_FS (1 << 31) /* First segment of a packet */ 509#define TX_FS (1 << 31) /* First segment of a packet */
465#define TX_LS (1 << 30) /* Final segment of a packet */ 510#define TX_LS (1 << 30) /* Final segment of a packet */
466#define TX_LEN_MASK 0x3ffff 511#define GTSENDV4 (1 << 28)
512#define GTSENDV6 (1 << 27)
513#define GTTCPHO_SHIFT 18
514#define GTTCPHO_MAX 0x7fU
515#define TX_LEN_MAX 0x3ffffU
467 516
468 __le32 opts2; 517 __le32 opts2;
469#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ 518#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
470#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ 519#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
471#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ 520#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
472#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */ 521#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
522#define MSS_SHIFT 17
523#define MSS_MAX 0x7ffU
524#define TCPHO_SHIFT 17
525#define TCPHO_MAX 0x7ffU
473}; 526};
474 527
475struct r8152; 528struct r8152;
@@ -511,11 +564,13 @@ struct r8152 {
511 void (*init)(struct r8152 *); 564 void (*init)(struct r8152 *);
512 int (*enable)(struct r8152 *); 565 int (*enable)(struct r8152 *);
513 void (*disable)(struct r8152 *); 566 void (*disable)(struct r8152 *);
567 void (*up)(struct r8152 *);
514 void (*down)(struct r8152 *); 568 void (*down)(struct r8152 *);
515 void (*unload)(struct r8152 *); 569 void (*unload)(struct r8152 *);
516 } rtl_ops; 570 } rtl_ops;
517 571
518 int intr_interval; 572 int intr_interval;
573 u32 saved_wolopts;
519 u32 msg_enable; 574 u32 msg_enable;
520 u32 tx_qlen; 575 u32 tx_qlen;
521 u16 ocp_base; 576 u16 ocp_base;
@@ -534,12 +589,21 @@ enum rtl_version {
534 RTL_VER_MAX 589 RTL_VER_MAX
535}; 590};
536 591
592enum tx_csum_stat {
593 TX_CSUM_SUCCESS = 0,
594 TX_CSUM_TSO,
595 TX_CSUM_NONE
596};
597
537/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 598/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
538 * The RTL chips use a 64 element hash table based on the Ethernet CRC. 599 * The RTL chips use a 64 element hash table based on the Ethernet CRC.
539 */ 600 */
540static const int multicast_filter_limit = 32; 601static const int multicast_filter_limit = 32;
541static unsigned int rx_buf_sz = 16384; 602static unsigned int rx_buf_sz = 16384;
542 603
604#define RTL_LIMITED_TSO_SIZE (rx_buf_sz - sizeof(struct tx_desc) - \
605 VLAN_ETH_HLEN - VLAN_HLEN)
606
543static 607static
544int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) 608int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
545{ 609{
@@ -577,6 +641,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
577 value, index, tmp, size, 500); 641 value, index, tmp, size, 500);
578 642
579 kfree(tmp); 643 kfree(tmp);
644
580 return ret; 645 return ret;
581} 646}
582 647
@@ -862,11 +927,21 @@ static u16 sram_read(struct r8152 *tp, u16 addr)
862static int read_mii_word(struct net_device *netdev, int phy_id, int reg) 927static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
863{ 928{
864 struct r8152 *tp = netdev_priv(netdev); 929 struct r8152 *tp = netdev_priv(netdev);
930 int ret;
865 931
866 if (phy_id != R8152_PHY_ID) 932 if (phy_id != R8152_PHY_ID)
867 return -EINVAL; 933 return -EINVAL;
868 934
869 return r8152_mdio_read(tp, reg); 935 ret = usb_autopm_get_interface(tp->intf);
936 if (ret < 0)
937 goto out;
938
939 ret = r8152_mdio_read(tp, reg);
940
941 usb_autopm_put_interface(tp->intf);
942
943out:
944 return ret;
870} 945}
871 946
872static 947static
@@ -877,7 +952,12 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
877 if (phy_id != R8152_PHY_ID) 952 if (phy_id != R8152_PHY_ID)
878 return; 953 return;
879 954
955 if (usb_autopm_get_interface(tp->intf) < 0)
956 return;
957
880 r8152_mdio_write(tp, reg, val); 958 r8152_mdio_write(tp, reg, val);
959
960 usb_autopm_put_interface(tp->intf);
881} 961}
882 962
883static 963static
@@ -886,11 +966,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
886static inline void set_ethernet_addr(struct r8152 *tp) 966static inline void set_ethernet_addr(struct r8152 *tp)
887{ 967{
888 struct net_device *dev = tp->netdev; 968 struct net_device *dev = tp->netdev;
969 int ret;
889 u8 node_id[8] = {0}; 970 u8 node_id[8] = {0};
890 971
891 if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0) 972 if (tp->version == RTL_VER_01)
973 ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id);
974 else
975 ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id);
976
977 if (ret < 0) {
892 netif_notice(tp, probe, dev, "inet addr fail\n"); 978 netif_notice(tp, probe, dev, "inet addr fail\n");
893 else { 979 } else {
980 if (tp->version != RTL_VER_01) {
981 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
982 CRWECR_CONFIG);
983 pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES,
984 sizeof(node_id), node_id);
985 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
986 CRWECR_NORAML);
987 }
988
894 memcpy(dev->dev_addr, node_id, dev->addr_len); 989 memcpy(dev->dev_addr, node_id, dev->addr_len);
895 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 990 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
896 } 991 }
@@ -913,15 +1008,9 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
913 return 0; 1008 return 0;
914} 1009}
915 1010
916static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
917{
918 return &dev->stats;
919}
920
921static void read_bulk_callback(struct urb *urb) 1011static void read_bulk_callback(struct urb *urb)
922{ 1012{
923 struct net_device *netdev; 1013 struct net_device *netdev;
924 unsigned long flags;
925 int status = urb->status; 1014 int status = urb->status;
926 struct rx_agg *agg; 1015 struct rx_agg *agg;
927 struct r8152 *tp; 1016 struct r8152 *tp;
@@ -948,14 +1037,16 @@ static void read_bulk_callback(struct urb *urb)
948 if (!netif_carrier_ok(netdev)) 1037 if (!netif_carrier_ok(netdev))
949 return; 1038 return;
950 1039
1040 usb_mark_last_busy(tp->udev);
1041
951 switch (status) { 1042 switch (status) {
952 case 0: 1043 case 0:
953 if (urb->actual_length < ETH_ZLEN) 1044 if (urb->actual_length < ETH_ZLEN)
954 break; 1045 break;
955 1046
956 spin_lock_irqsave(&tp->rx_lock, flags); 1047 spin_lock(&tp->rx_lock);
957 list_add_tail(&agg->list, &tp->rx_done); 1048 list_add_tail(&agg->list, &tp->rx_done);
958 spin_unlock_irqrestore(&tp->rx_lock, flags); 1049 spin_unlock(&tp->rx_lock);
959 tasklet_schedule(&tp->tl); 1050 tasklet_schedule(&tp->tl);
960 return; 1051 return;
961 case -ESHUTDOWN: 1052 case -ESHUTDOWN:
@@ -978,9 +1069,9 @@ static void read_bulk_callback(struct urb *urb)
978 if (result == -ENODEV) { 1069 if (result == -ENODEV) {
979 netif_device_detach(tp->netdev); 1070 netif_device_detach(tp->netdev);
980 } else if (result) { 1071 } else if (result) {
981 spin_lock_irqsave(&tp->rx_lock, flags); 1072 spin_lock(&tp->rx_lock);
982 list_add_tail(&agg->list, &tp->rx_done); 1073 list_add_tail(&agg->list, &tp->rx_done);
983 spin_unlock_irqrestore(&tp->rx_lock, flags); 1074 spin_unlock(&tp->rx_lock);
984 tasklet_schedule(&tp->tl); 1075 tasklet_schedule(&tp->tl);
985 } 1076 }
986} 1077}
@@ -988,7 +1079,7 @@ static void read_bulk_callback(struct urb *urb)
988static void write_bulk_callback(struct urb *urb) 1079static void write_bulk_callback(struct urb *urb)
989{ 1080{
990 struct net_device_stats *stats; 1081 struct net_device_stats *stats;
991 unsigned long flags; 1082 struct net_device *netdev;
992 struct tx_agg *agg; 1083 struct tx_agg *agg;
993 struct r8152 *tp; 1084 struct r8152 *tp;
994 int status = urb->status; 1085 int status = urb->status;
@@ -1001,21 +1092,24 @@ static void write_bulk_callback(struct urb *urb)
1001 if (!tp) 1092 if (!tp)
1002 return; 1093 return;
1003 1094
1004 stats = rtl8152_get_stats(tp->netdev); 1095 netdev = tp->netdev;
1096 stats = &netdev->stats;
1005 if (status) { 1097 if (status) {
1006 if (net_ratelimit()) 1098 if (net_ratelimit())
1007 netdev_warn(tp->netdev, "Tx status %d\n", status); 1099 netdev_warn(netdev, "Tx status %d\n", status);
1008 stats->tx_errors += agg->skb_num; 1100 stats->tx_errors += agg->skb_num;
1009 } else { 1101 } else {
1010 stats->tx_packets += agg->skb_num; 1102 stats->tx_packets += agg->skb_num;
1011 stats->tx_bytes += agg->skb_len; 1103 stats->tx_bytes += agg->skb_len;
1012 } 1104 }
1013 1105
1014 spin_lock_irqsave(&tp->tx_lock, flags); 1106 spin_lock(&tp->tx_lock);
1015 list_add_tail(&agg->list, &tp->tx_free); 1107 list_add_tail(&agg->list, &tp->tx_free);
1016 spin_unlock_irqrestore(&tp->tx_lock, flags); 1108 spin_unlock(&tp->tx_lock);
1017 1109
1018 if (!netif_carrier_ok(tp->netdev)) 1110 usb_autopm_put_interface_async(tp->intf);
1111
1112 if (!netif_carrier_ok(netdev))
1019 return; 1113 return;
1020 1114
1021 if (!test_bit(WORK_ENABLE, &tp->flags)) 1115 if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1220,6 +1314,9 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
1220 struct tx_agg *agg = NULL; 1314 struct tx_agg *agg = NULL;
1221 unsigned long flags; 1315 unsigned long flags;
1222 1316
1317 if (list_empty(&tp->tx_free))
1318 return NULL;
1319
1223 spin_lock_irqsave(&tp->tx_lock, flags); 1320 spin_lock_irqsave(&tp->tx_lock, flags);
1224 if (!list_empty(&tp->tx_free)) { 1321 if (!list_empty(&tp->tx_free)) {
1225 struct list_head *cursor; 1322 struct list_head *cursor;
@@ -1233,24 +1330,138 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
1233 return agg; 1330 return agg;
1234} 1331}
1235 1332
1236static void 1333static inline __be16 get_protocol(struct sk_buff *skb)
1237r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) 1334{
1335 __be16 protocol;
1336
1337 if (skb->protocol == htons(ETH_P_8021Q))
1338 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1339 else
1340 protocol = skb->protocol;
1341
1342 return protocol;
1343}
1344
1345/*
1346 * r8152_csum_workaround()
1347 * The hw limites the value the transport offset. When the offset is out of the
1348 * range, calculate the checksum by sw.
1349 */
1350static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
1351 struct sk_buff_head *list)
1352{
1353 if (skb_shinfo(skb)->gso_size) {
1354 netdev_features_t features = tp->netdev->features;
1355 struct sk_buff_head seg_list;
1356 struct sk_buff *segs, *nskb;
1357
1358 features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1359 segs = skb_gso_segment(skb, features);
1360 if (IS_ERR(segs) || !segs)
1361 goto drop;
1362
1363 __skb_queue_head_init(&seg_list);
1364
1365 do {
1366 nskb = segs;
1367 segs = segs->next;
1368 nskb->next = NULL;
1369 __skb_queue_tail(&seg_list, nskb);
1370 } while (segs);
1371
1372 skb_queue_splice(&seg_list, list);
1373 dev_kfree_skb(skb);
1374 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1375 if (skb_checksum_help(skb) < 0)
1376 goto drop;
1377
1378 __skb_queue_head(list, skb);
1379 } else {
1380 struct net_device_stats *stats;
1381
1382drop:
1383 stats = &tp->netdev->stats;
1384 stats->tx_dropped++;
1385 dev_kfree_skb(skb);
1386 }
1387}
1388
1389/*
1390 * msdn_giant_send_check()
1391 * According to the document of microsoft, the TCP Pseudo Header excludes the
1392 * packet length for IPv6 TCP large packets.
1393 */
1394static int msdn_giant_send_check(struct sk_buff *skb)
1395{
1396 const struct ipv6hdr *ipv6h;
1397 struct tcphdr *th;
1398 int ret;
1399
1400 ret = skb_cow_head(skb, 0);
1401 if (ret)
1402 return ret;
1403
1404 ipv6h = ipv6_hdr(skb);
1405 th = tcp_hdr(skb);
1406
1407 th->check = 0;
1408 th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
1409
1410 return ret;
1411}
1412
1413static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
1414 struct sk_buff *skb, u32 len, u32 transport_offset)
1238{ 1415{
1239 memset(desc, 0, sizeof(*desc)); 1416 u32 mss = skb_shinfo(skb)->gso_size;
1417 u32 opts1, opts2 = 0;
1418 int ret = TX_CSUM_SUCCESS;
1419
1420 WARN_ON_ONCE(len > TX_LEN_MAX);
1421
1422 opts1 = len | TX_FS | TX_LS;
1423
1424 if (mss) {
1425 if (transport_offset > GTTCPHO_MAX) {
1426 netif_warn(tp, tx_err, tp->netdev,
1427 "Invalid transport offset 0x%x for TSO\n",
1428 transport_offset);
1429 ret = TX_CSUM_TSO;
1430 goto unavailable;
1431 }
1240 1432
1241 desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS); 1433 switch (get_protocol(skb)) {
1434 case htons(ETH_P_IP):
1435 opts1 |= GTSENDV4;
1436 break;
1242 1437
1243 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1438 case htons(ETH_P_IPV6):
1244 __be16 protocol; 1439 if (msdn_giant_send_check(skb)) {
1440 ret = TX_CSUM_TSO;
1441 goto unavailable;
1442 }
1443 opts1 |= GTSENDV6;
1444 break;
1445
1446 default:
1447 WARN_ON_ONCE(1);
1448 break;
1449 }
1450
1451 opts1 |= transport_offset << GTTCPHO_SHIFT;
1452 opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
1453 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1245 u8 ip_protocol; 1454 u8 ip_protocol;
1246 u32 opts2 = 0;
1247 1455
1248 if (skb->protocol == htons(ETH_P_8021Q)) 1456 if (transport_offset > TCPHO_MAX) {
1249 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 1457 netif_warn(tp, tx_err, tp->netdev,
1250 else 1458 "Invalid transport offset 0x%x\n",
1251 protocol = skb->protocol; 1459 transport_offset);
1460 ret = TX_CSUM_NONE;
1461 goto unavailable;
1462 }
1252 1463
1253 switch (protocol) { 1464 switch (get_protocol(skb)) {
1254 case htons(ETH_P_IP): 1465 case htons(ETH_P_IP):
1255 opts2 |= IPV4_CS; 1466 opts2 |= IPV4_CS;
1256 ip_protocol = ip_hdr(skb)->protocol; 1467 ip_protocol = ip_hdr(skb)->protocol;
@@ -1266,24 +1477,34 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
1266 break; 1477 break;
1267 } 1478 }
1268 1479
1269 if (ip_protocol == IPPROTO_TCP) { 1480 if (ip_protocol == IPPROTO_TCP)
1270 opts2 |= TCP_CS; 1481 opts2 |= TCP_CS;
1271 opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17; 1482 else if (ip_protocol == IPPROTO_UDP)
1272 } else if (ip_protocol == IPPROTO_UDP) {
1273 opts2 |= UDP_CS; 1483 opts2 |= UDP_CS;
1274 } else { 1484 else
1275 WARN_ON_ONCE(1); 1485 WARN_ON_ONCE(1);
1276 }
1277 1486
1278 desc->opts2 = cpu_to_le32(opts2); 1487 opts2 |= transport_offset << TCPHO_SHIFT;
1279 } 1488 }
1489
1490 desc->opts2 = cpu_to_le32(opts2);
1491 desc->opts1 = cpu_to_le32(opts1);
1492
1493unavailable:
1494 return ret;
1280} 1495}
1281 1496
1282static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) 1497static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1283{ 1498{
1284 int remain; 1499 struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
1500 int remain, ret;
1285 u8 *tx_data; 1501 u8 *tx_data;
1286 1502
1503 __skb_queue_head_init(&skb_head);
1504 spin_lock(&tx_queue->lock);
1505 skb_queue_splice_init(tx_queue, &skb_head);
1506 spin_unlock(&tx_queue->lock);
1507
1287 tx_data = agg->head; 1508 tx_data = agg->head;
1288 agg->skb_num = agg->skb_len = 0; 1509 agg->skb_num = agg->skb_len = 0;
1289 remain = rx_buf_sz; 1510 remain = rx_buf_sz;
@@ -1292,32 +1513,56 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1292 struct tx_desc *tx_desc; 1513 struct tx_desc *tx_desc;
1293 struct sk_buff *skb; 1514 struct sk_buff *skb;
1294 unsigned int len; 1515 unsigned int len;
1516 u32 offset;
1295 1517
1296 skb = skb_dequeue(&tp->tx_queue); 1518 skb = __skb_dequeue(&skb_head);
1297 if (!skb) 1519 if (!skb)
1298 break; 1520 break;
1299 1521
1300 remain -= sizeof(*tx_desc); 1522 len = skb->len + sizeof(*tx_desc);
1301 len = skb->len; 1523
1302 if (remain < len) { 1524 if (len > remain) {
1303 skb_queue_head(&tp->tx_queue, skb); 1525 __skb_queue_head(&skb_head, skb);
1304 break; 1526 break;
1305 } 1527 }
1306 1528
1307 tx_data = tx_agg_align(tx_data); 1529 tx_data = tx_agg_align(tx_data);
1308 tx_desc = (struct tx_desc *)tx_data; 1530 tx_desc = (struct tx_desc *)tx_data;
1531
1532 offset = (u32)skb_transport_offset(skb);
1533
1534 if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
1535 r8152_csum_workaround(tp, skb, &skb_head);
1536 continue;
1537 }
1538
1309 tx_data += sizeof(*tx_desc); 1539 tx_data += sizeof(*tx_desc);
1310 1540
1311 r8152_tx_csum(tp, tx_desc, skb); 1541 len = skb->len;
1312 memcpy(tx_data, skb->data, len); 1542 if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
1313 agg->skb_num++; 1543 struct net_device_stats *stats = &tp->netdev->stats;
1544
1545 stats->tx_dropped++;
1546 dev_kfree_skb_any(skb);
1547 tx_data -= sizeof(*tx_desc);
1548 continue;
1549 }
1550
1551 tx_data += len;
1314 agg->skb_len += len; 1552 agg->skb_len += len;
1553 agg->skb_num++;
1554
1315 dev_kfree_skb_any(skb); 1555 dev_kfree_skb_any(skb);
1316 1556
1317 tx_data += len;
1318 remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); 1557 remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
1319 } 1558 }
1320 1559
1560 if (!skb_queue_empty(&skb_head)) {
1561 spin_lock(&tx_queue->lock);
1562 skb_queue_splice(&skb_head, tx_queue);
1563 spin_unlock(&tx_queue->lock);
1564 }
1565
1321 netif_tx_lock(tp->netdev); 1566 netif_tx_lock(tp->netdev);
1322 1567
1323 if (netif_queue_stopped(tp->netdev) && 1568 if (netif_queue_stopped(tp->netdev) &&
@@ -1326,20 +1571,67 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1326 1571
1327 netif_tx_unlock(tp->netdev); 1572 netif_tx_unlock(tp->netdev);
1328 1573
1574 ret = usb_autopm_get_interface_async(tp->intf);
1575 if (ret < 0)
1576 goto out_tx_fill;
1577
1329 usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), 1578 usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
1330 agg->head, (int)(tx_data - (u8 *)agg->head), 1579 agg->head, (int)(tx_data - (u8 *)agg->head),
1331 (usb_complete_t)write_bulk_callback, agg); 1580 (usb_complete_t)write_bulk_callback, agg);
1332 1581
1333 return usb_submit_urb(agg->urb, GFP_ATOMIC); 1582 ret = usb_submit_urb(agg->urb, GFP_ATOMIC);
1583 if (ret < 0)
1584 usb_autopm_put_interface_async(tp->intf);
1585
1586out_tx_fill:
1587 return ret;
1588}
1589
1590static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
1591{
1592 u8 checksum = CHECKSUM_NONE;
1593 u32 opts2, opts3;
1594
1595 if (tp->version == RTL_VER_01)
1596 goto return_result;
1597
1598 opts2 = le32_to_cpu(rx_desc->opts2);
1599 opts3 = le32_to_cpu(rx_desc->opts3);
1600
1601 if (opts2 & RD_IPV4_CS) {
1602 if (opts3 & IPF)
1603 checksum = CHECKSUM_NONE;
1604 else if ((opts2 & RD_UDP_CS) && (opts3 & UDPF))
1605 checksum = CHECKSUM_NONE;
1606 else if ((opts2 & RD_TCP_CS) && (opts3 & TCPF))
1607 checksum = CHECKSUM_NONE;
1608 else
1609 checksum = CHECKSUM_UNNECESSARY;
1610 } else if (RD_IPV6_CS) {
1611 if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
1612 checksum = CHECKSUM_UNNECESSARY;
1613 else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
1614 checksum = CHECKSUM_UNNECESSARY;
1615 }
1616
1617return_result:
1618 return checksum;
1334} 1619}
1335 1620
1336static void rx_bottom(struct r8152 *tp) 1621static void rx_bottom(struct r8152 *tp)
1337{ 1622{
1338 unsigned long flags; 1623 unsigned long flags;
1339 struct list_head *cursor, *next; 1624 struct list_head *cursor, *next, rx_queue;
1340 1625
1626 if (list_empty(&tp->rx_done))
1627 return;
1628
1629 INIT_LIST_HEAD(&rx_queue);
1341 spin_lock_irqsave(&tp->rx_lock, flags); 1630 spin_lock_irqsave(&tp->rx_lock, flags);
1342 list_for_each_safe(cursor, next, &tp->rx_done) { 1631 list_splice_init(&tp->rx_done, &rx_queue);
1632 spin_unlock_irqrestore(&tp->rx_lock, flags);
1633
1634 list_for_each_safe(cursor, next, &rx_queue) {
1343 struct rx_desc *rx_desc; 1635 struct rx_desc *rx_desc;
1344 struct rx_agg *agg; 1636 struct rx_agg *agg;
1345 int len_used = 0; 1637 int len_used = 0;
@@ -1348,7 +1640,6 @@ static void rx_bottom(struct r8152 *tp)
1348 int ret; 1640 int ret;
1349 1641
1350 list_del_init(cursor); 1642 list_del_init(cursor);
1351 spin_unlock_irqrestore(&tp->rx_lock, flags);
1352 1643
1353 agg = list_entry(cursor, struct rx_agg, list); 1644 agg = list_entry(cursor, struct rx_agg, list);
1354 urb = agg->urb; 1645 urb = agg->urb;
@@ -1361,7 +1652,7 @@ static void rx_bottom(struct r8152 *tp)
1361 1652
1362 while (urb->actual_length > len_used) { 1653 while (urb->actual_length > len_used) {
1363 struct net_device *netdev = tp->netdev; 1654 struct net_device *netdev = tp->netdev;
1364 struct net_device_stats *stats; 1655 struct net_device_stats *stats = &netdev->stats;
1365 unsigned int pkt_len; 1656 unsigned int pkt_len;
1366 struct sk_buff *skb; 1657 struct sk_buff *skb;
1367 1658
@@ -1373,23 +1664,24 @@ static void rx_bottom(struct r8152 *tp)
1373 if (urb->actual_length < len_used) 1664 if (urb->actual_length < len_used)
1374 break; 1665 break;
1375 1666
1376 stats = rtl8152_get_stats(netdev);
1377
1378 pkt_len -= CRC_SIZE; 1667 pkt_len -= CRC_SIZE;
1379 rx_data += sizeof(struct rx_desc); 1668 rx_data += sizeof(struct rx_desc);
1380 1669
1381 skb = netdev_alloc_skb_ip_align(netdev, pkt_len); 1670 skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
1382 if (!skb) { 1671 if (!skb) {
1383 stats->rx_dropped++; 1672 stats->rx_dropped++;
1384 break; 1673 goto find_next_rx;
1385 } 1674 }
1675
1676 skb->ip_summed = r8152_rx_csum(tp, rx_desc);
1386 memcpy(skb->data, rx_data, pkt_len); 1677 memcpy(skb->data, rx_data, pkt_len);
1387 skb_put(skb, pkt_len); 1678 skb_put(skb, pkt_len);
1388 skb->protocol = eth_type_trans(skb, netdev); 1679 skb->protocol = eth_type_trans(skb, netdev);
1389 netif_rx(skb); 1680 netif_receive_skb(skb);
1390 stats->rx_packets++; 1681 stats->rx_packets++;
1391 stats->rx_bytes += pkt_len; 1682 stats->rx_bytes += pkt_len;
1392 1683
1684find_next_rx:
1393 rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE); 1685 rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
1394 rx_desc = (struct rx_desc *)rx_data; 1686 rx_desc = (struct rx_desc *)rx_data;
1395 len_used = (int)(rx_data - (u8 *)agg->head); 1687 len_used = (int)(rx_data - (u8 *)agg->head);
@@ -1398,13 +1690,13 @@ static void rx_bottom(struct r8152 *tp)
1398 1690
1399submit: 1691submit:
1400 ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); 1692 ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
1401 spin_lock_irqsave(&tp->rx_lock, flags);
1402 if (ret && ret != -ENODEV) { 1693 if (ret && ret != -ENODEV) {
1403 list_add_tail(&agg->list, next); 1694 spin_lock_irqsave(&tp->rx_lock, flags);
1695 list_add_tail(&agg->list, &tp->rx_done);
1696 spin_unlock_irqrestore(&tp->rx_lock, flags);
1404 tasklet_schedule(&tp->tl); 1697 tasklet_schedule(&tp->tl);
1405 } 1698 }
1406 } 1699 }
1407 spin_unlock_irqrestore(&tp->rx_lock, flags);
1408} 1700}
1409 1701
1410static void tx_bottom(struct r8152 *tp) 1702static void tx_bottom(struct r8152 *tp)
@@ -1423,19 +1715,18 @@ static void tx_bottom(struct r8152 *tp)
1423 1715
1424 res = r8152_tx_agg_fill(tp, agg); 1716 res = r8152_tx_agg_fill(tp, agg);
1425 if (res) { 1717 if (res) {
1426 struct net_device_stats *stats; 1718 struct net_device *netdev = tp->netdev;
1427 struct net_device *netdev;
1428 unsigned long flags;
1429
1430 netdev = tp->netdev;
1431 stats = rtl8152_get_stats(netdev);
1432 1719
1433 if (res == -ENODEV) { 1720 if (res == -ENODEV) {
1434 netif_device_detach(netdev); 1721 netif_device_detach(netdev);
1435 } else { 1722 } else {
1723 struct net_device_stats *stats = &netdev->stats;
1724 unsigned long flags;
1725
1436 netif_warn(tp, tx_err, netdev, 1726 netif_warn(tp, tx_err, netdev,
1437 "failed tx_urb %d\n", res); 1727 "failed tx_urb %d\n", res);
1438 stats->tx_dropped += agg->skb_num; 1728 stats->tx_dropped += agg->skb_num;
1729
1439 spin_lock_irqsave(&tp->tx_lock, flags); 1730 spin_lock_irqsave(&tp->tx_lock, flags);
1440 list_add_tail(&agg->list, &tp->tx_free); 1731 list_add_tail(&agg->list, &tp->tx_free);
1441 spin_unlock_irqrestore(&tp->tx_lock, flags); 1732 spin_unlock_irqrestore(&tp->tx_lock, flags);
@@ -1475,6 +1766,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
1475 return usb_submit_urb(agg->urb, mem_flags); 1766 return usb_submit_urb(agg->urb, mem_flags);
1476} 1767}
1477 1768
1769static void rtl_drop_queued_tx(struct r8152 *tp)
1770{
1771 struct net_device_stats *stats = &tp->netdev->stats;
1772 struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
1773 struct sk_buff *skb;
1774
1775 if (skb_queue_empty(tx_queue))
1776 return;
1777
1778 __skb_queue_head_init(&skb_head);
1779 spin_lock_bh(&tx_queue->lock);
1780 skb_queue_splice_init(tx_queue, &skb_head);
1781 spin_unlock_bh(&tx_queue->lock);
1782
1783 while ((skb = __skb_dequeue(&skb_head))) {
1784 dev_kfree_skb(skb);
1785 stats->tx_dropped++;
1786 }
1787}
1788
1478static void rtl8152_tx_timeout(struct net_device *netdev) 1789static void rtl8152_tx_timeout(struct net_device *netdev)
1479{ 1790{
1480 struct r8152 *tp = netdev_priv(netdev); 1791 struct r8152 *tp = netdev_priv(netdev);
@@ -1538,7 +1849,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
1538} 1849}
1539 1850
1540static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, 1851static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
1541 struct net_device *netdev) 1852 struct net_device *netdev)
1542{ 1853{
1543 struct r8152 *tp = netdev_priv(netdev); 1854 struct r8152 *tp = netdev_priv(netdev);
1544 1855
@@ -1546,13 +1857,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
1546 1857
1547 skb_queue_tail(&tp->tx_queue, skb); 1858 skb_queue_tail(&tp->tx_queue, skb);
1548 1859
1549 if (list_empty(&tp->tx_free) && 1860 if (!list_empty(&tp->tx_free)) {
1550 skb_queue_len(&tp->tx_queue) > tp->tx_qlen) 1861 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
1862 set_bit(SCHEDULE_TASKLET, &tp->flags);
1863 schedule_delayed_work(&tp->schedule, 0);
1864 } else {
1865 usb_mark_last_busy(tp->udev);
1866 tasklet_schedule(&tp->tl);
1867 }
1868 } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
1551 netif_stop_queue(netdev); 1869 netif_stop_queue(netdev);
1552 1870
1553 if (!list_empty(&tp->tx_free))
1554 tasklet_schedule(&tp->tl);
1555
1556 return NETDEV_TX_OK; 1871 return NETDEV_TX_OK;
1557} 1872}
1558 1873
@@ -1610,6 +1925,18 @@ static void rtl_set_eee_plus(struct r8152 *tp)
1610 } 1925 }
1611} 1926}
1612 1927
1928static void rxdy_gated_en(struct r8152 *tp, bool enable)
1929{
1930 u32 ocp_data;
1931
1932 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
1933 if (enable)
1934 ocp_data |= RXDY_GATED_EN;
1935 else
1936 ocp_data &= ~RXDY_GATED_EN;
1937 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1938}
1939
1613static int rtl_enable(struct r8152 *tp) 1940static int rtl_enable(struct r8152 *tp)
1614{ 1941{
1615 u32 ocp_data; 1942 u32 ocp_data;
@@ -1621,9 +1948,7 @@ static int rtl_enable(struct r8152 *tp)
1621 ocp_data |= CR_RE | CR_TE; 1948 ocp_data |= CR_RE | CR_TE;
1622 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); 1949 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
1623 1950
1624 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); 1951 rxdy_gated_en(tp, false);
1625 ocp_data &= ~RXDY_GATED_EN;
1626 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1627 1952
1628 INIT_LIST_HEAD(&tp->rx_done); 1953 INIT_LIST_HEAD(&tp->rx_done);
1629 ret = 0; 1954 ret = 0;
@@ -1678,8 +2003,6 @@ static int rtl8153_enable(struct r8152 *tp)
1678 2003
1679static void rtl8152_disable(struct r8152 *tp) 2004static void rtl8152_disable(struct r8152 *tp)
1680{ 2005{
1681 struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
1682 struct sk_buff *skb;
1683 u32 ocp_data; 2006 u32 ocp_data;
1684 int i; 2007 int i;
1685 2008
@@ -1687,17 +2010,12 @@ static void rtl8152_disable(struct r8152 *tp)
1687 ocp_data &= ~RCR_ACPT_ALL; 2010 ocp_data &= ~RCR_ACPT_ALL;
1688 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2011 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
1689 2012
1690 while ((skb = skb_dequeue(&tp->tx_queue))) { 2013 rtl_drop_queued_tx(tp);
1691 dev_kfree_skb(skb);
1692 stats->tx_dropped++;
1693 }
1694 2014
1695 for (i = 0; i < RTL8152_MAX_TX; i++) 2015 for (i = 0; i < RTL8152_MAX_TX; i++)
1696 usb_kill_urb(tp->tx_info[i].urb); 2016 usb_kill_urb(tp->tx_info[i].urb);
1697 2017
1698 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); 2018 rxdy_gated_en(tp, true);
1699 ocp_data |= RXDY_GATED_EN;
1700 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1701 2019
1702 for (i = 0; i < 1000; i++) { 2020 for (i = 0; i < 1000; i++) {
1703 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); 2021 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -1718,18 +2036,209 @@ static void rtl8152_disable(struct r8152 *tp)
1718 rtl8152_nic_reset(tp); 2036 rtl8152_nic_reset(tp);
1719} 2037}
1720 2038
2039static void r8152_power_cut_en(struct r8152 *tp, bool enable)
2040{
2041 u32 ocp_data;
2042
2043 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
2044 if (enable)
2045 ocp_data |= POWER_CUT;
2046 else
2047 ocp_data &= ~POWER_CUT;
2048 ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
2049
2050 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
2051 ocp_data &= ~RESUME_INDICATE;
2052 ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
2053}
2054
2055#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
2056
2057static u32 __rtl_get_wol(struct r8152 *tp)
2058{
2059 u32 ocp_data;
2060 u32 wolopts = 0;
2061
2062 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
2063 if (!(ocp_data & LAN_WAKE_EN))
2064 return 0;
2065
2066 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
2067 if (ocp_data & LINK_ON_WAKE_EN)
2068 wolopts |= WAKE_PHY;
2069
2070 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
2071 if (ocp_data & UWF_EN)
2072 wolopts |= WAKE_UCAST;
2073 if (ocp_data & BWF_EN)
2074 wolopts |= WAKE_BCAST;
2075 if (ocp_data & MWF_EN)
2076 wolopts |= WAKE_MCAST;
2077
2078 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
2079 if (ocp_data & MAGIC_EN)
2080 wolopts |= WAKE_MAGIC;
2081
2082 return wolopts;
2083}
2084
2085static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2086{
2087 u32 ocp_data;
2088
2089 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
2090
2091 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
2092 ocp_data &= ~LINK_ON_WAKE_EN;
2093 if (wolopts & WAKE_PHY)
2094 ocp_data |= LINK_ON_WAKE_EN;
2095 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
2096
2097 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
2098 ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
2099 if (wolopts & WAKE_UCAST)
2100 ocp_data |= UWF_EN;
2101 if (wolopts & WAKE_BCAST)
2102 ocp_data |= BWF_EN;
2103 if (wolopts & WAKE_MCAST)
2104 ocp_data |= MWF_EN;
2105 if (wolopts & WAKE_ANY)
2106 ocp_data |= LAN_WAKE_EN;
2107 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
2108
2109 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2110
2111 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
2112 ocp_data &= ~MAGIC_EN;
2113 if (wolopts & WAKE_MAGIC)
2114 ocp_data |= MAGIC_EN;
2115 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
2116
2117 if (wolopts & WAKE_ANY)
2118 device_set_wakeup_enable(&tp->udev->dev, true);
2119 else
2120 device_set_wakeup_enable(&tp->udev->dev, false);
2121}
2122
2123static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2124{
2125 if (enable) {
2126 u32 ocp_data;
2127
2128 __rtl_set_wol(tp, WAKE_ANY);
2129
2130 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
2131
2132 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
2133 ocp_data |= LINK_OFF_WAKE_EN;
2134 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
2135
2136 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2137 } else {
2138 __rtl_set_wol(tp, tp->saved_wolopts);
2139 }
2140}
2141
2142static void rtl_phy_reset(struct r8152 *tp)
2143{
2144 u16 data;
2145 int i;
2146
2147 clear_bit(PHY_RESET, &tp->flags);
2148
2149 data = r8152_mdio_read(tp, MII_BMCR);
2150
2151 /* don't reset again before the previous one complete */
2152 if (data & BMCR_RESET)
2153 return;
2154
2155 data |= BMCR_RESET;
2156 r8152_mdio_write(tp, MII_BMCR, data);
2157
2158 for (i = 0; i < 50; i++) {
2159 msleep(20);
2160 if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
2161 break;
2162 }
2163}
2164
2165static void rtl_clear_bp(struct r8152 *tp)
2166{
2167 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
2168 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
2169 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
2170 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
2171 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
2172 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
2173 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
2174 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
2175 mdelay(3);
2176 ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
2177 ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
2178}
2179
2180static void r8153_clear_bp(struct r8152 *tp)
2181{
2182 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
2183 ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
2184 rtl_clear_bp(tp);
2185}
2186
2187static void r8153_teredo_off(struct r8152 *tp)
2188{
2189 u32 ocp_data;
2190
2191 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
2192 ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
2193 ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
2194
2195 ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
2196 ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
2197 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
2198}
2199
2200static void r8152b_disable_aldps(struct r8152 *tp)
2201{
2202 ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
2203 msleep(20);
2204}
2205
2206static inline void r8152b_enable_aldps(struct r8152 *tp)
2207{
2208 ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
2209 LINKENA | DIS_SDSAVE);
2210}
2211
2212static void r8152b_hw_phy_cfg(struct r8152 *tp)
2213{
2214 u16 data;
2215
2216 data = r8152_mdio_read(tp, MII_BMCR);
2217 if (data & BMCR_PDOWN) {
2218 data &= ~BMCR_PDOWN;
2219 r8152_mdio_write(tp, MII_BMCR, data);
2220 }
2221
2222 r8152b_disable_aldps(tp);
2223
2224 rtl_clear_bp(tp);
2225
2226 r8152b_enable_aldps(tp);
2227 set_bit(PHY_RESET, &tp->flags);
2228}
2229
1721static void r8152b_exit_oob(struct r8152 *tp) 2230static void r8152b_exit_oob(struct r8152 *tp)
1722{ 2231{
1723 u32 ocp_data; 2232 u32 ocp_data;
1724 int i; 2233 int i;
1725 2234
1726 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2235 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
1727 ocp_data &= ~RCR_ACPT_ALL; 2236 ocp_data &= ~RCR_ACPT_ALL;
1728 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2237 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
1729 2238
1730 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); 2239 rxdy_gated_en(tp, true);
1731 ocp_data |= RXDY_GATED_EN; 2240 r8153_teredo_off(tp);
1732 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); 2241 r8152b_hw_phy_cfg(tp);
1733 2242
1734 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2243 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
1735 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00); 2244 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@ -1835,10 +2344,6 @@ static void r8152b_enter_oob(struct r8152 *tp)
1835 2344
1836 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); 2345 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
1837 2346
1838 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
1839 ocp_data |= MAGIC_EN;
1840 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
1841
1842 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); 2347 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
1843 ocp_data |= CPCR_RX_VLAN; 2348 ocp_data |= CPCR_RX_VLAN;
1844 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); 2349 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
@@ -1851,36 +2356,26 @@ static void r8152b_enter_oob(struct r8152 *tp)
1851 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; 2356 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
1852 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); 2357 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
1853 2358
1854 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN); 2359 rxdy_gated_en(tp, false);
1855
1856 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
1857 ocp_data &= ~RXDY_GATED_EN;
1858 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1859 2360
1860 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2361 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
1861 ocp_data |= RCR_APM | RCR_AM | RCR_AB; 2362 ocp_data |= RCR_APM | RCR_AM | RCR_AB;
1862 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2363 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
1863} 2364}
1864 2365
1865static void r8152b_disable_aldps(struct r8152 *tp)
1866{
1867 ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
1868 msleep(20);
1869}
1870
1871static inline void r8152b_enable_aldps(struct r8152 *tp)
1872{
1873 ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
1874 LINKENA | DIS_SDSAVE);
1875}
1876
1877static void r8153_hw_phy_cfg(struct r8152 *tp) 2366static void r8153_hw_phy_cfg(struct r8152 *tp)
1878{ 2367{
1879 u32 ocp_data; 2368 u32 ocp_data;
1880 u16 data; 2369 u16 data;
1881 2370
1882 ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); 2371 ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
1883 r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE); 2372 data = r8152_mdio_read(tp, MII_BMCR);
2373 if (data & BMCR_PDOWN) {
2374 data &= ~BMCR_PDOWN;
2375 r8152_mdio_write(tp, MII_BMCR, data);
2376 }
2377
2378 r8153_clear_bp(tp);
1884 2379
1885 if (tp->version == RTL_VER_03) { 2380 if (tp->version == RTL_VER_03) {
1886 data = ocp_reg_read(tp, OCP_EEE_CFG); 2381 data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -1916,9 +2411,11 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
1916 data = sram_read(tp, SRAM_10M_AMP2); 2411 data = sram_read(tp, SRAM_10M_AMP2);
1917 data |= AMP_DN; 2412 data |= AMP_DN;
1918 sram_write(tp, SRAM_10M_AMP2, data); 2413 sram_write(tp, SRAM_10M_AMP2, data);
2414
2415 set_bit(PHY_RESET, &tp->flags);
1919} 2416}
1920 2417
1921static void r8153_u1u2en(struct r8152 *tp, int enable) 2418static void r8153_u1u2en(struct r8152 *tp, bool enable)
1922{ 2419{
1923 u8 u1u2[8]; 2420 u8 u1u2[8];
1924 2421
@@ -1930,7 +2427,7 @@ static void r8153_u1u2en(struct r8152 *tp, int enable)
1930 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); 2427 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
1931} 2428}
1932 2429
1933static void r8153_u2p3en(struct r8152 *tp, int enable) 2430static void r8153_u2p3en(struct r8152 *tp, bool enable)
1934{ 2431{
1935 u32 ocp_data; 2432 u32 ocp_data;
1936 2433
@@ -1942,7 +2439,7 @@ static void r8153_u2p3en(struct r8152 *tp, int enable)
1942 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); 2439 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
1943} 2440}
1944 2441
1945static void r8153_power_cut_en(struct r8152 *tp, int enable) 2442static void r8153_power_cut_en(struct r8152 *tp, bool enable)
1946{ 2443{
1947 u32 ocp_data; 2444 u32 ocp_data;
1948 2445
@@ -1958,28 +2455,12 @@ static void r8153_power_cut_en(struct r8152 *tp, int enable)
1958 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); 2455 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
1959} 2456}
1960 2457
1961static void r8153_teredo_off(struct r8152 *tp)
1962{
1963 u32 ocp_data;
1964
1965 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
1966 ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
1967 ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
1968
1969 ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
1970 ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
1971 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
1972}
1973
1974static void r8153_first_init(struct r8152 *tp) 2458static void r8153_first_init(struct r8152 *tp)
1975{ 2459{
1976 u32 ocp_data; 2460 u32 ocp_data;
1977 int i; 2461 int i;
1978 2462
1979 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); 2463 rxdy_gated_en(tp, true);
1980 ocp_data |= RXDY_GATED_EN;
1981 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1982
1983 r8153_teredo_off(tp); 2464 r8153_teredo_off(tp);
1984 2465
1985 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2466 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -2072,10 +2553,6 @@ static void r8153_enter_oob(struct r8152 *tp)
2072 2553
2073 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); 2554 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
2074 2555
2075 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
2076 ocp_data |= MAGIC_EN;
2077 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
2078
2079 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); 2556 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
2080 ocp_data &= ~TEREDO_WAKE_MASK; 2557 ocp_data &= ~TEREDO_WAKE_MASK;
2081 ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); 2558 ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
@@ -2092,11 +2569,7 @@ static void r8153_enter_oob(struct r8152 *tp)
2092 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; 2569 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
2093 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); 2570 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
2094 2571
2095 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN); 2572 rxdy_gated_en(tp, false);
2096
2097 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
2098 ocp_data &= ~RXDY_GATED_EN;
2099 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
2100 2573
2101 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2574 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
2102 ocp_data |= RCR_APM | RCR_AM | RCR_AB; 2575 ocp_data |= RCR_APM | RCR_AM | RCR_AB;
@@ -2187,12 +2660,26 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
2187 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; 2660 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
2188 } 2661 }
2189 2662
2663 if (test_bit(PHY_RESET, &tp->flags))
2664 bmcr |= BMCR_RESET;
2665
2190 if (tp->mii.supports_gmii) 2666 if (tp->mii.supports_gmii)
2191 r8152_mdio_write(tp, MII_CTRL1000, gbcr); 2667 r8152_mdio_write(tp, MII_CTRL1000, gbcr);
2192 2668
2193 r8152_mdio_write(tp, MII_ADVERTISE, anar); 2669 r8152_mdio_write(tp, MII_ADVERTISE, anar);
2194 r8152_mdio_write(tp, MII_BMCR, bmcr); 2670 r8152_mdio_write(tp, MII_BMCR, bmcr);
2195 2671
2672 if (test_bit(PHY_RESET, &tp->flags)) {
2673 int i;
2674
2675 clear_bit(PHY_RESET, &tp->flags);
2676 for (i = 0; i < 50; i++) {
2677 msleep(20);
2678 if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
2679 break;
2680 }
2681 }
2682
2196out: 2683out:
2197 2684
2198 return ret; 2685 return ret;
@@ -2200,12 +2687,7 @@ out:
2200 2687
2201static void rtl8152_down(struct r8152 *tp) 2688static void rtl8152_down(struct r8152 *tp)
2202{ 2689{
2203 u32 ocp_data; 2690 r8152_power_cut_en(tp, false);
2204
2205 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
2206 ocp_data &= ~POWER_CUT;
2207 ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
2208
2209 r8152b_disable_aldps(tp); 2691 r8152b_disable_aldps(tp);
2210 r8152b_enter_oob(tp); 2692 r8152b_enter_oob(tp);
2211 r8152b_enable_aldps(tp); 2693 r8152b_enable_aldps(tp);
@@ -2213,8 +2695,8 @@ static void rtl8152_down(struct r8152 *tp)
2213 2695
2214static void rtl8153_down(struct r8152 *tp) 2696static void rtl8153_down(struct r8152 *tp)
2215{ 2697{
2216 r8153_u1u2en(tp, 0); 2698 r8153_u1u2en(tp, false);
2217 r8153_power_cut_en(tp, 0); 2699 r8153_power_cut_en(tp, false);
2218 r8153_disable_aldps(tp); 2700 r8153_disable_aldps(tp);
2219 r8153_enter_oob(tp); 2701 r8153_enter_oob(tp);
2220 r8153_enable_aldps(tp); 2702 r8153_enable_aldps(tp);
@@ -2249,6 +2731,9 @@ static void rtl_work_func_t(struct work_struct *work)
2249{ 2731{
2250 struct r8152 *tp = container_of(work, struct r8152, schedule.work); 2732 struct r8152 *tp = container_of(work, struct r8152, schedule.work);
2251 2733
2734 if (usb_autopm_get_interface(tp->intf) < 0)
2735 return;
2736
2252 if (!test_bit(WORK_ENABLE, &tp->flags)) 2737 if (!test_bit(WORK_ENABLE, &tp->flags))
2253 goto out1; 2738 goto out1;
2254 2739
@@ -2261,8 +2746,17 @@ static void rtl_work_func_t(struct work_struct *work)
2261 if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) 2746 if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
2262 _rtl8152_set_rx_mode(tp->netdev); 2747 _rtl8152_set_rx_mode(tp->netdev);
2263 2748
2749 if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
2750 (tp->speed & LINK_STATUS)) {
2751 clear_bit(SCHEDULE_TASKLET, &tp->flags);
2752 tasklet_schedule(&tp->tl);
2753 }
2754
2755 if (test_bit(PHY_RESET, &tp->flags))
2756 rtl_phy_reset(tp);
2757
2264out1: 2758out1:
2265 return; 2759 usb_autopm_put_interface(tp->intf);
2266} 2760}
2267 2761
2268static int rtl8152_open(struct net_device *netdev) 2762static int rtl8152_open(struct net_device *netdev)
@@ -2270,6 +2764,27 @@ static int rtl8152_open(struct net_device *netdev)
2270 struct r8152 *tp = netdev_priv(netdev); 2764 struct r8152 *tp = netdev_priv(netdev);
2271 int res = 0; 2765 int res = 0;
2272 2766
2767 res = alloc_all_mem(tp);
2768 if (res)
2769 goto out;
2770
2771 res = usb_autopm_get_interface(tp->intf);
2772 if (res < 0) {
2773 free_all_mem(tp);
2774 goto out;
2775 }
2776
2777 /* The WORK_ENABLE may be set when autoresume occurs */
2778 if (test_bit(WORK_ENABLE, &tp->flags)) {
2779 clear_bit(WORK_ENABLE, &tp->flags);
2780 usb_kill_urb(tp->intr_urb);
2781 cancel_delayed_work_sync(&tp->schedule);
2782 if (tp->speed & LINK_STATUS)
2783 tp->rtl_ops.disable(tp);
2784 }
2785
2786 tp->rtl_ops.up(tp);
2787
2273 rtl8152_set_speed(tp, AUTONEG_ENABLE, 2788 rtl8152_set_speed(tp, AUTONEG_ENABLE,
2274 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, 2789 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
2275 DUPLEX_FULL); 2790 DUPLEX_FULL);
@@ -2277,15 +2792,19 @@ static int rtl8152_open(struct net_device *netdev)
2277 netif_carrier_off(netdev); 2792 netif_carrier_off(netdev);
2278 netif_start_queue(netdev); 2793 netif_start_queue(netdev);
2279 set_bit(WORK_ENABLE, &tp->flags); 2794 set_bit(WORK_ENABLE, &tp->flags);
2795
2280 res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); 2796 res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
2281 if (res) { 2797 if (res) {
2282 if (res == -ENODEV) 2798 if (res == -ENODEV)
2283 netif_device_detach(tp->netdev); 2799 netif_device_detach(tp->netdev);
2284 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", 2800 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
2285 res); 2801 res);
2802 free_all_mem(tp);
2286 } 2803 }
2287 2804
2805 usb_autopm_put_interface(tp->intf);
2288 2806
2807out:
2289 return res; 2808 return res;
2290} 2809}
2291 2810
@@ -2298,33 +2817,30 @@ static int rtl8152_close(struct net_device *netdev)
2298 usb_kill_urb(tp->intr_urb); 2817 usb_kill_urb(tp->intr_urb);
2299 cancel_delayed_work_sync(&tp->schedule); 2818 cancel_delayed_work_sync(&tp->schedule);
2300 netif_stop_queue(netdev); 2819 netif_stop_queue(netdev);
2301 tasklet_disable(&tp->tl);
2302 tp->rtl_ops.disable(tp);
2303 tasklet_enable(&tp->tl);
2304 2820
2305 return res; 2821 res = usb_autopm_get_interface(tp->intf);
2306} 2822 if (res < 0) {
2823 rtl_drop_queued_tx(tp);
2824 } else {
2825 /*
2826 * The autosuspend may have been enabled and wouldn't
2827 * be disable when autoresume occurs, because the
2828 * netif_running() would be false.
2829 */
2830 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
2831 rtl_runtime_suspend_enable(tp, false);
2832 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
2833 }
2307 2834
2308static void rtl_clear_bp(struct r8152 *tp) 2835 tasklet_disable(&tp->tl);
2309{ 2836 tp->rtl_ops.down(tp);
2310 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0); 2837 tasklet_enable(&tp->tl);
2311 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0); 2838 usb_autopm_put_interface(tp->intf);
2312 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0); 2839 }
2313 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
2314 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
2315 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
2316 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
2317 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
2318 mdelay(3);
2319 ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
2320 ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
2321}
2322 2840
2323static void r8153_clear_bp(struct r8152 *tp) 2841 free_all_mem(tp);
2324{ 2842
2325 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); 2843 return res;
2326 ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
2327 rtl_clear_bp(tp);
2328} 2844}
2329 2845
2330static void r8152b_enable_eee(struct r8152 *tp) 2846static void r8152b_enable_eee(struct r8152 *tp)
@@ -2375,18 +2891,18 @@ static void r8152b_enable_fc(struct r8152 *tp)
2375 r8152_mdio_write(tp, MII_ADVERTISE, anar); 2891 r8152_mdio_write(tp, MII_ADVERTISE, anar);
2376} 2892}
2377 2893
2378static void r8152b_hw_phy_cfg(struct r8152 *tp) 2894static void rtl_tally_reset(struct r8152 *tp)
2379{ 2895{
2380 r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE); 2896 u32 ocp_data;
2381 r8152b_disable_aldps(tp); 2897
2898 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY);
2899 ocp_data |= TALLY_RESET;
2900 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data);
2382} 2901}
2383 2902
2384static void r8152b_init(struct r8152 *tp) 2903static void r8152b_init(struct r8152 *tp)
2385{ 2904{
2386 u32 ocp_data; 2905 u32 ocp_data;
2387 int i;
2388
2389 rtl_clear_bp(tp);
2390 2906
2391 if (tp->version == RTL_VER_01) { 2907 if (tp->version == RTL_VER_01) {
2392 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); 2908 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
@@ -2394,17 +2910,7 @@ static void r8152b_init(struct r8152 *tp)
2394 ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); 2910 ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
2395 } 2911 }
2396 2912
2397 r8152b_hw_phy_cfg(tp); 2913 r8152_power_cut_en(tp, false);
2398
2399 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
2400 ocp_data &= ~POWER_CUT;
2401 ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
2402
2403 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
2404 ocp_data &= ~RESUME_INDICATE;
2405 ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
2406
2407 r8152b_exit_oob(tp);
2408 2914
2409 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); 2915 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
2410 ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH; 2916 ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
@@ -2420,14 +2926,7 @@ static void r8152b_init(struct r8152 *tp)
2420 r8152b_enable_eee(tp); 2926 r8152b_enable_eee(tp);
2421 r8152b_enable_aldps(tp); 2927 r8152b_enable_aldps(tp);
2422 r8152b_enable_fc(tp); 2928 r8152b_enable_fc(tp);
2423 2929 rtl_tally_reset(tp);
2424 r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
2425 BMCR_ANRESTART);
2426 for (i = 0; i < 100; i++) {
2427 udelay(100);
2428 if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
2429 break;
2430 }
2431 2930
2432 /* enable rx aggregation */ 2931 /* enable rx aggregation */
2433 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); 2932 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
@@ -2440,7 +2939,7 @@ static void r8153_init(struct r8152 *tp)
2440 u32 ocp_data; 2939 u32 ocp_data;
2441 int i; 2940 int i;
2442 2941
2443 r8153_u1u2en(tp, 0); 2942 r8153_u1u2en(tp, false);
2444 2943
2445 for (i = 0; i < 500; i++) { 2944 for (i = 0; i < 500; i++) {
2446 if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & 2945 if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
@@ -2456,14 +2955,12 @@ static void r8153_init(struct r8152 *tp)
2456 msleep(20); 2955 msleep(20);
2457 } 2956 }
2458 2957
2459 r8153_u2p3en(tp, 0); 2958 r8153_u2p3en(tp, false);
2460 2959
2461 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL); 2960 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
2462 ocp_data &= ~TIMER11_EN; 2961 ocp_data &= ~TIMER11_EN;
2463 ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data); 2962 ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
2464 2963
2465 r8153_clear_bp(tp);
2466
2467 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); 2964 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
2468 ocp_data &= ~LED_MODE_MASK; 2965 ocp_data &= ~LED_MODE_MASK;
2469 ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); 2966 ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
@@ -2481,10 +2978,8 @@ static void r8153_init(struct r8152 *tp)
2481 ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE; 2978 ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
2482 ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data); 2979 ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
2483 2980
2484 r8153_power_cut_en(tp, 0); 2981 r8153_power_cut_en(tp, false);
2485 r8153_u1u2en(tp, 1); 2982 r8153_u1u2en(tp, true);
2486
2487 r8153_first_init(tp);
2488 2983
2489 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO); 2984 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
2490 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO); 2985 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
@@ -2499,26 +2994,31 @@ static void r8153_init(struct r8152 *tp)
2499 r8153_enable_eee(tp); 2994 r8153_enable_eee(tp);
2500 r8153_enable_aldps(tp); 2995 r8153_enable_aldps(tp);
2501 r8152b_enable_fc(tp); 2996 r8152b_enable_fc(tp);
2502 2997 rtl_tally_reset(tp);
2503 r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
2504 BMCR_ANRESTART);
2505} 2998}
2506 2999
2507static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3000static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
2508{ 3001{
2509 struct r8152 *tp = usb_get_intfdata(intf); 3002 struct r8152 *tp = usb_get_intfdata(intf);
2510 3003
2511 netif_device_detach(tp->netdev); 3004 if (PMSG_IS_AUTO(message))
3005 set_bit(SELECTIVE_SUSPEND, &tp->flags);
3006 else
3007 netif_device_detach(tp->netdev);
2512 3008
2513 if (netif_running(tp->netdev)) { 3009 if (netif_running(tp->netdev)) {
2514 clear_bit(WORK_ENABLE, &tp->flags); 3010 clear_bit(WORK_ENABLE, &tp->flags);
2515 usb_kill_urb(tp->intr_urb); 3011 usb_kill_urb(tp->intr_urb);
2516 cancel_delayed_work_sync(&tp->schedule); 3012 cancel_delayed_work_sync(&tp->schedule);
2517 tasklet_disable(&tp->tl); 3013 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3014 rtl_runtime_suspend_enable(tp, true);
3015 } else {
3016 tasklet_disable(&tp->tl);
3017 tp->rtl_ops.down(tp);
3018 tasklet_enable(&tp->tl);
3019 }
2518 } 3020 }
2519 3021
2520 tp->rtl_ops.down(tp);
2521
2522 return 0; 3022 return 0;
2523} 3023}
2524 3024
@@ -2526,22 +3026,77 @@ static int rtl8152_resume(struct usb_interface *intf)
2526{ 3026{
2527 struct r8152 *tp = usb_get_intfdata(intf); 3027 struct r8152 *tp = usb_get_intfdata(intf);
2528 3028
2529 tp->rtl_ops.init(tp); 3029 if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
2530 netif_device_attach(tp->netdev); 3030 tp->rtl_ops.init(tp);
3031 netif_device_attach(tp->netdev);
3032 }
3033
2531 if (netif_running(tp->netdev)) { 3034 if (netif_running(tp->netdev)) {
2532 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3035 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3036 rtl_runtime_suspend_enable(tp, false);
3037 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3038 if (tp->speed & LINK_STATUS)
3039 tp->rtl_ops.disable(tp);
3040 } else {
3041 tp->rtl_ops.up(tp);
3042 rtl8152_set_speed(tp, AUTONEG_ENABLE,
2533 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, 3043 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
2534 DUPLEX_FULL); 3044 DUPLEX_FULL);
3045 }
2535 tp->speed = 0; 3046 tp->speed = 0;
2536 netif_carrier_off(tp->netdev); 3047 netif_carrier_off(tp->netdev);
2537 set_bit(WORK_ENABLE, &tp->flags); 3048 set_bit(WORK_ENABLE, &tp->flags);
2538 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3049 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
2539 tasklet_enable(&tp->tl);
2540 } 3050 }
2541 3051
2542 return 0; 3052 return 0;
2543} 3053}
2544 3054
3055static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3056{
3057 struct r8152 *tp = netdev_priv(dev);
3058
3059 if (usb_autopm_get_interface(tp->intf) < 0)
3060 return;
3061
3062 wol->supported = WAKE_ANY;
3063 wol->wolopts = __rtl_get_wol(tp);
3064
3065 usb_autopm_put_interface(tp->intf);
3066}
3067
3068static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3069{
3070 struct r8152 *tp = netdev_priv(dev);
3071 int ret;
3072
3073 ret = usb_autopm_get_interface(tp->intf);
3074 if (ret < 0)
3075 goto out_set_wol;
3076
3077 __rtl_set_wol(tp, wol->wolopts);
3078 tp->saved_wolopts = wol->wolopts & WAKE_ANY;
3079
3080 usb_autopm_put_interface(tp->intf);
3081
3082out_set_wol:
3083 return ret;
3084}
3085
3086static u32 rtl8152_get_msglevel(struct net_device *dev)
3087{
3088 struct r8152 *tp = netdev_priv(dev);
3089
3090 return tp->msg_enable;
3091}
3092
3093static void rtl8152_set_msglevel(struct net_device *dev, u32 value)
3094{
3095 struct r8152 *tp = netdev_priv(dev);
3096
3097 tp->msg_enable = value;
3098}
3099
2545static void rtl8152_get_drvinfo(struct net_device *netdev, 3100static void rtl8152_get_drvinfo(struct net_device *netdev,
2546 struct ethtool_drvinfo *info) 3101 struct ethtool_drvinfo *info)
2547{ 3102{
@@ -2566,8 +3121,76 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2566static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 3121static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2567{ 3122{
2568 struct r8152 *tp = netdev_priv(dev); 3123 struct r8152 *tp = netdev_priv(dev);
3124 int ret;
3125
3126 ret = usb_autopm_get_interface(tp->intf);
3127 if (ret < 0)
3128 goto out;
2569 3129
2570 return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); 3130 ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
3131
3132 usb_autopm_put_interface(tp->intf);
3133
3134out:
3135 return ret;
3136}
3137
3138static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = {
3139 "tx_packets",
3140 "rx_packets",
3141 "tx_errors",
3142 "rx_errors",
3143 "rx_missed",
3144 "align_errors",
3145 "tx_single_collisions",
3146 "tx_multi_collisions",
3147 "rx_unicast",
3148 "rx_broadcast",
3149 "rx_multicast",
3150 "tx_aborted",
3151 "tx_underrun",
3152};
3153
3154static int rtl8152_get_sset_count(struct net_device *dev, int sset)
3155{
3156 switch (sset) {
3157 case ETH_SS_STATS:
3158 return ARRAY_SIZE(rtl8152_gstrings);
3159 default:
3160 return -EOPNOTSUPP;
3161 }
3162}
3163
3164static void rtl8152_get_ethtool_stats(struct net_device *dev,
3165 struct ethtool_stats *stats, u64 *data)
3166{
3167 struct r8152 *tp = netdev_priv(dev);
3168 struct tally_counter tally;
3169
3170 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
3171
3172 data[0] = le64_to_cpu(tally.tx_packets);
3173 data[1] = le64_to_cpu(tally.rx_packets);
3174 data[2] = le64_to_cpu(tally.tx_errors);
3175 data[3] = le32_to_cpu(tally.rx_errors);
3176 data[4] = le16_to_cpu(tally.rx_missed);
3177 data[5] = le16_to_cpu(tally.align_errors);
3178 data[6] = le32_to_cpu(tally.tx_one_collision);
3179 data[7] = le32_to_cpu(tally.tx_multi_collision);
3180 data[8] = le64_to_cpu(tally.rx_unicast);
3181 data[9] = le64_to_cpu(tally.rx_broadcast);
3182 data[10] = le32_to_cpu(tally.rx_multicast);
3183 data[11] = le16_to_cpu(tally.tx_aborted);
3184 data[12] = le16_to_cpu(tally.tx_underun);
3185}
3186
3187static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3188{
3189 switch (stringset) {
3190 case ETH_SS_STATS:
3191 memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
3192 break;
3193 }
2571} 3194}
2572 3195
2573static struct ethtool_ops ops = { 3196static struct ethtool_ops ops = {
@@ -2575,13 +3198,24 @@ static struct ethtool_ops ops = {
2575 .get_settings = rtl8152_get_settings, 3198 .get_settings = rtl8152_get_settings,
2576 .set_settings = rtl8152_set_settings, 3199 .set_settings = rtl8152_set_settings,
2577 .get_link = ethtool_op_get_link, 3200 .get_link = ethtool_op_get_link,
3201 .get_msglevel = rtl8152_get_msglevel,
3202 .set_msglevel = rtl8152_set_msglevel,
3203 .get_wol = rtl8152_get_wol,
3204 .set_wol = rtl8152_set_wol,
3205 .get_strings = rtl8152_get_strings,
3206 .get_sset_count = rtl8152_get_sset_count,
3207 .get_ethtool_stats = rtl8152_get_ethtool_stats,
2578}; 3208};
2579 3209
2580static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 3210static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2581{ 3211{
2582 struct r8152 *tp = netdev_priv(netdev); 3212 struct r8152 *tp = netdev_priv(netdev);
2583 struct mii_ioctl_data *data = if_mii(rq); 3213 struct mii_ioctl_data *data = if_mii(rq);
2584 int res = 0; 3214 int res;
3215
3216 res = usb_autopm_get_interface(tp->intf);
3217 if (res < 0)
3218 goto out;
2585 3219
2586 switch (cmd) { 3220 switch (cmd) {
2587 case SIOCGMIIPHY: 3221 case SIOCGMIIPHY:
@@ -2604,6 +3238,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2604 res = -EOPNOTSUPP; 3238 res = -EOPNOTSUPP;
2605 } 3239 }
2606 3240
3241 usb_autopm_put_interface(tp->intf);
3242
3243out:
2607 return res; 3244 return res;
2608} 3245}
2609 3246
@@ -2656,22 +3293,13 @@ static void r8152b_get_version(struct r8152 *tp)
2656 3293
2657static void rtl8152_unload(struct r8152 *tp) 3294static void rtl8152_unload(struct r8152 *tp)
2658{ 3295{
2659 u32 ocp_data; 3296 if (tp->version != RTL_VER_01)
2660 3297 r8152_power_cut_en(tp, true);
2661 if (tp->version != RTL_VER_01) {
2662 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
2663 ocp_data |= POWER_CUT;
2664 ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
2665 }
2666
2667 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
2668 ocp_data &= ~RESUME_INDICATE;
2669 ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
2670} 3298}
2671 3299
2672static void rtl8153_unload(struct r8152 *tp) 3300static void rtl8153_unload(struct r8152 *tp)
2673{ 3301{
2674 r8153_power_cut_en(tp, 1); 3302 r8153_power_cut_en(tp, true);
2675} 3303}
2676 3304
2677static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) 3305static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -2686,6 +3314,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
2686 ops->init = r8152b_init; 3314 ops->init = r8152b_init;
2687 ops->enable = rtl8152_enable; 3315 ops->enable = rtl8152_enable;
2688 ops->disable = rtl8152_disable; 3316 ops->disable = rtl8152_disable;
3317 ops->up = r8152b_exit_oob;
2689 ops->down = rtl8152_down; 3318 ops->down = rtl8152_down;
2690 ops->unload = rtl8152_unload; 3319 ops->unload = rtl8152_unload;
2691 ret = 0; 3320 ret = 0;
@@ -2694,6 +3323,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
2694 ops->init = r8153_init; 3323 ops->init = r8153_init;
2695 ops->enable = rtl8153_enable; 3324 ops->enable = rtl8153_enable;
2696 ops->disable = rtl8152_disable; 3325 ops->disable = rtl8152_disable;
3326 ops->up = r8153_first_init;
2697 ops->down = rtl8153_down; 3327 ops->down = rtl8153_down;
2698 ops->unload = rtl8153_unload; 3328 ops->unload = rtl8153_unload;
2699 ret = 0; 3329 ret = 0;
@@ -2709,6 +3339,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
2709 ops->init = r8153_init; 3339 ops->init = r8153_init;
2710 ops->enable = rtl8153_enable; 3340 ops->enable = rtl8153_enable;
2711 ops->disable = rtl8152_disable; 3341 ops->disable = rtl8152_disable;
3342 ops->up = r8153_first_init;
2712 ops->down = rtl8153_down; 3343 ops->down = rtl8153_down;
2713 ops->unload = rtl8153_unload; 3344 ops->unload = rtl8153_unload;
2714 ret = 0; 3345 ret = 0;
@@ -2766,9 +3397,15 @@ static int rtl8152_probe(struct usb_interface *intf,
2766 netdev->netdev_ops = &rtl8152_netdev_ops; 3397 netdev->netdev_ops = &rtl8152_netdev_ops;
2767 netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; 3398 netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
2768 3399
2769 netdev->features |= NETIF_F_IP_CSUM; 3400 netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
2770 netdev->hw_features = NETIF_F_IP_CSUM; 3401 NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM |
3402 NETIF_F_TSO6;
3403 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
3404 NETIF_F_TSO | NETIF_F_FRAGLIST |
3405 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
3406
2771 SET_ETHTOOL_OPS(netdev, &ops); 3407 SET_ETHTOOL_OPS(netdev, &ops);
3408 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
2772 3409
2773 tp->mii.dev = netdev; 3410 tp->mii.dev = netdev;
2774 tp->mii.mdio_read = read_mii_word; 3411 tp->mii.mdio_read = read_mii_word;
@@ -2778,14 +3415,12 @@ static int rtl8152_probe(struct usb_interface *intf,
2778 tp->mii.phy_id = R8152_PHY_ID; 3415 tp->mii.phy_id = R8152_PHY_ID;
2779 tp->mii.supports_gmii = 0; 3416 tp->mii.supports_gmii = 0;
2780 3417
3418 intf->needs_remote_wakeup = 1;
3419
2781 r8152b_get_version(tp); 3420 r8152b_get_version(tp);
2782 tp->rtl_ops.init(tp); 3421 tp->rtl_ops.init(tp);
2783 set_ethernet_addr(tp); 3422 set_ethernet_addr(tp);
2784 3423
2785 ret = alloc_all_mem(tp);
2786 if (ret)
2787 goto out;
2788
2789 usb_set_intfdata(intf, tp); 3424 usb_set_intfdata(intf, tp);
2790 3425
2791 ret = register_netdev(netdev); 3426 ret = register_netdev(netdev);
@@ -2794,6 +3429,12 @@ static int rtl8152_probe(struct usb_interface *intf,
2794 goto out1; 3429 goto out1;
2795 } 3430 }
2796 3431
3432 tp->saved_wolopts = __rtl_get_wol(tp);
3433 if (tp->saved_wolopts)
3434 device_set_wakeup_enable(&udev->dev, true);
3435 else
3436 device_set_wakeup_enable(&udev->dev, false);
3437
2797 netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); 3438 netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
2798 3439
2799 return 0; 3440 return 0;
@@ -2815,7 +3456,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
2815 tasklet_kill(&tp->tl); 3456 tasklet_kill(&tp->tl);
2816 unregister_netdev(tp->netdev); 3457 unregister_netdev(tp->netdev);
2817 tp->rtl_ops.unload(tp); 3458 tp->rtl_ops.unload(tp);
2818 free_all_mem(tp);
2819 free_netdev(tp->netdev); 3459 free_netdev(tp->netdev);
2820 } 3460 }
2821} 3461}
@@ -2838,6 +3478,8 @@ static struct usb_driver rtl8152_driver = {
2838 .suspend = rtl8152_suspend, 3478 .suspend = rtl8152_suspend,
2839 .resume = rtl8152_resume, 3479 .resume = rtl8152_resume,
2840 .reset_resume = rtl8152_resume, 3480 .reset_resume = rtl8152_resume,
3481 .supports_autosuspend = 1,
3482 .disable_hub_initiated_lpm = 1,
2841}; 3483};
2842 3484
2843module_usb_driver(rtl8152_driver); 3485module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c0e7c64765ab..b4a10bcb66a0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -14,6 +14,7 @@
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/u64_stats_sync.h> 15#include <linux/u64_stats_sync.h>
16 16
17#include <net/rtnetlink.h>
17#include <net/dst.h> 18#include <net/dst.h>
18#include <net/xfrm.h> 19#include <net/xfrm.h>
19#include <linux/veth.h> 20#include <linux/veth.h>
@@ -155,10 +156,10 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
155 unsigned int start; 156 unsigned int start;
156 157
157 do { 158 do {
158 start = u64_stats_fetch_begin_bh(&stats->syncp); 159 start = u64_stats_fetch_begin_irq(&stats->syncp);
159 packets = stats->packets; 160 packets = stats->packets;
160 bytes = stats->bytes; 161 bytes = stats->bytes;
161 } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); 162 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
162 result->packets += packets; 163 result->packets += packets;
163 result->bytes += bytes; 164 result->bytes += bytes;
164 } 165 }
@@ -235,18 +236,9 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
235 236
236static int veth_dev_init(struct net_device *dev) 237static int veth_dev_init(struct net_device *dev)
237{ 238{
238 int i; 239 dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
239
240 dev->vstats = alloc_percpu(struct pcpu_vstats);
241 if (!dev->vstats) 240 if (!dev->vstats)
242 return -ENOMEM; 241 return -ENOMEM;
243
244 for_each_possible_cpu(i) {
245 struct pcpu_vstats *veth_stats;
246 veth_stats = per_cpu_ptr(dev->vstats, i);
247 u64_stats_init(&veth_stats->syncp);
248 }
249
250 return 0; 242 return 0;
251} 243}
252 244
@@ -336,10 +328,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
336 328
337 nla_peer = data[VETH_INFO_PEER]; 329 nla_peer = data[VETH_INFO_PEER];
338 ifmp = nla_data(nla_peer); 330 ifmp = nla_data(nla_peer);
339 err = nla_parse(peer_tb, IFLA_MAX, 331 err = rtnl_nla_parse_ifla(peer_tb,
340 nla_data(nla_peer) + sizeof(struct ifinfomsg), 332 nla_data(nla_peer) + sizeof(struct ifinfomsg),
341 nla_len(nla_peer) - sizeof(struct ifinfomsg), 333 nla_len(nla_peer) - sizeof(struct ifinfomsg));
342 ifla_policy);
343 if (err < 0) 334 if (err < 0)
344 return err; 335 return err;
345 336
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 470b01f3e7b4..7b687469199b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -882,7 +882,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
882 dev_warn(&dev->dev, 882 dev_warn(&dev->dev,
883 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); 883 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
884 dev->stats.tx_dropped++; 884 dev->stats.tx_dropped++;
885 kfree_skb(skb); 885 dev_kfree_skb_any(skb);
886 return NETDEV_TX_OK; 886 return NETDEV_TX_OK;
887 } 887 }
888 virtqueue_kick(sq->vq); 888 virtqueue_kick(sq->vq);
@@ -1000,16 +1000,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
1000 u64 tpackets, tbytes, rpackets, rbytes; 1000 u64 tpackets, tbytes, rpackets, rbytes;
1001 1001
1002 do { 1002 do {
1003 start = u64_stats_fetch_begin_bh(&stats->tx_syncp); 1003 start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
1004 tpackets = stats->tx_packets; 1004 tpackets = stats->tx_packets;
1005 tbytes = stats->tx_bytes; 1005 tbytes = stats->tx_bytes;
1006 } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); 1006 } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
1007 1007
1008 do { 1008 do {
1009 start = u64_stats_fetch_begin_bh(&stats->rx_syncp); 1009 start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
1010 rpackets = stats->rx_packets; 1010 rpackets = stats->rx_packets;
1011 rbytes = stats->rx_bytes; 1011 rbytes = stats->rx_bytes;
1012 } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); 1012 } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
1013 1013
1014 tot->rx_packets += rpackets; 1014 tot->rx_packets += rpackets;
1015 tot->tx_packets += tpackets; 1015 tot->tx_packets += tpackets;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0fa3b44f7342..97394345e5dd 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1078,7 +1078,7 @@ unlock_drop_pkt:
1078 spin_unlock_irqrestore(&tq->tx_lock, flags); 1078 spin_unlock_irqrestore(&tq->tx_lock, flags);
1079drop_pkt: 1079drop_pkt:
1080 tq->stats.drop_total++; 1080 tq->stats.drop_total++;
1081 dev_kfree_skb(skb); 1081 dev_kfree_skb_any(skb);
1082 return NETDEV_TX_OK; 1082 return NETDEV_TX_OK;
1083} 1083}
1084 1084
@@ -2738,47 +2738,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2738/* 2738/*
2739 * Enable MSIx vectors. 2739 * Enable MSIx vectors.
2740 * Returns : 2740 * Returns :
2741 * 0 on successful enabling of required vectors,
2742 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required 2741 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2743 * could be enabled. 2742 * were enabled.
2744 * number of vectors which can be enabled otherwise (this number is smaller 2743 * number of vectors which were enabled otherwise (this number is greater
2745 * than VMXNET3_LINUX_MIN_MSIX_VECT) 2744 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2746 */ 2745 */
2747 2746
2748static int 2747static int
2749vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, 2748vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
2750 int vectors) 2749{
2751{ 2750 int ret = pci_enable_msix_range(adapter->pdev,
2752 int err = 0, vector_threshold; 2751 adapter->intr.msix_entries, nvec, nvec);
2753 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT; 2752
2754 2753 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
2755 while (vectors >= vector_threshold) { 2754 dev_err(&adapter->netdev->dev,
2756 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2755 "Failed to enable %d MSI-X, trying %d\n",
2757 vectors); 2756 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
2758 if (!err) { 2757
2759 adapter->intr.num_intrs = vectors; 2758 ret = pci_enable_msix_range(adapter->pdev,
2760 return 0; 2759 adapter->intr.msix_entries,
2761 } else if (err < 0) { 2760 VMXNET3_LINUX_MIN_MSIX_VECT,
2762 dev_err(&adapter->netdev->dev, 2761 VMXNET3_LINUX_MIN_MSIX_VECT);
2763 "Failed to enable MSI-X, error: %d\n", err);
2764 vectors = 0;
2765 } else if (err < vector_threshold) {
2766 break;
2767 } else {
2768 /* If fails to enable required number of MSI-x vectors
2769 * try enabling minimum number of vectors required.
2770 */
2771 dev_err(&adapter->netdev->dev,
2772 "Failed to enable %d MSI-X, trying %d instead\n",
2773 vectors, vector_threshold);
2774 vectors = vector_threshold;
2775 }
2776 } 2762 }
2777 2763
2778 dev_info(&adapter->pdev->dev, 2764 if (ret < 0) {
2779 "Number of MSI-X interrupts which can be allocated " 2765 dev_err(&adapter->netdev->dev,
2780 "is lower than min threshold required.\n"); 2766 "Failed to enable MSI-X, error: %d\n", ret);
2781 return err; 2767 }
2768
2769 return ret;
2782} 2770}
2783 2771
2784 2772
@@ -2805,56 +2793,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2805 2793
2806#ifdef CONFIG_PCI_MSI 2794#ifdef CONFIG_PCI_MSI
2807 if (adapter->intr.type == VMXNET3_IT_MSIX) { 2795 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2808 int vector, err = 0; 2796 int i, nvec;
2809 2797
2810 adapter->intr.num_intrs = (adapter->share_intr == 2798 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
2811 VMXNET3_INTR_TXSHARE) ? 1 : 2799 1 : adapter->num_tx_queues;
2812 adapter->num_tx_queues; 2800 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
2813 adapter->intr.num_intrs += (adapter->share_intr == 2801 0 : adapter->num_rx_queues;
2814 VMXNET3_INTR_BUDDYSHARE) ? 0 : 2802 nvec += 1; /* for link event */
2815 adapter->num_rx_queues; 2803 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
2816 adapter->intr.num_intrs += 1; /* for link event */ 2804 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
2817 2805
2818 adapter->intr.num_intrs = (adapter->intr.num_intrs > 2806 for (i = 0; i < nvec; i++)
2819 VMXNET3_LINUX_MIN_MSIX_VECT 2807 adapter->intr.msix_entries[i].entry = i;
2820 ? adapter->intr.num_intrs : 2808
2821 VMXNET3_LINUX_MIN_MSIX_VECT); 2809 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
2822 2810 if (nvec < 0)
2823 for (vector = 0; vector < adapter->intr.num_intrs; vector++) 2811 goto msix_err;
2824 adapter->intr.msix_entries[vector].entry = vector; 2812
2825
2826 err = vmxnet3_acquire_msix_vectors(adapter,
2827 adapter->intr.num_intrs);
2828 /* If we cannot allocate one MSIx vector per queue 2813 /* If we cannot allocate one MSIx vector per queue
2829 * then limit the number of rx queues to 1 2814 * then limit the number of rx queues to 1
2830 */ 2815 */
2831 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2816 if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
2832 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2817 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2833 || adapter->num_rx_queues != 1) { 2818 || adapter->num_rx_queues != 1) {
2834 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2819 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2835 netdev_err(adapter->netdev, 2820 netdev_err(adapter->netdev,
2836 "Number of rx queues : 1\n"); 2821 "Number of rx queues : 1\n");
2837 adapter->num_rx_queues = 1; 2822 adapter->num_rx_queues = 1;
2838 adapter->intr.num_intrs =
2839 VMXNET3_LINUX_MIN_MSIX_VECT;
2840 } 2823 }
2841 return;
2842 } 2824 }
2843 if (!err)
2844 return;
2845 2825
2826 adapter->intr.num_intrs = nvec;
2827 return;
2828
2829msix_err:
2846 /* If we cannot allocate MSIx vectors use only one rx queue */ 2830 /* If we cannot allocate MSIx vectors use only one rx queue */
2847 dev_info(&adapter->pdev->dev, 2831 dev_info(&adapter->pdev->dev,
2848 "Failed to enable MSI-X, error %d. " 2832 "Failed to enable MSI-X, error %d. "
2849 "Limiting #rx queues to 1, try MSI.\n", err); 2833 "Limiting #rx queues to 1, try MSI.\n", nvec);
2850 2834
2851 adapter->intr.type = VMXNET3_IT_MSI; 2835 adapter->intr.type = VMXNET3_IT_MSI;
2852 } 2836 }
2853 2837
2854 if (adapter->intr.type == VMXNET3_IT_MSI) { 2838 if (adapter->intr.type == VMXNET3_IT_MSI) {
2855 int err; 2839 if (!pci_enable_msi(adapter->pdev)) {
2856 err = pci_enable_msi(adapter->pdev);
2857 if (!err) {
2858 adapter->num_rx_queues = 1; 2840 adapter->num_rx_queues = 1;
2859 adapter->intr.num_intrs = 1; 2841 adapter->intr.num_intrs = 1;
2860 return; 2842 return;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1236812c7be6..0d862a5077ab 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1132,7 +1132,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1132{ 1132{
1133 struct vxlan_sock *vs; 1133 struct vxlan_sock *vs;
1134 struct vxlanhdr *vxh; 1134 struct vxlanhdr *vxh;
1135 __be16 port;
1136 1135
1137 /* Need Vxlan and inner Ethernet header to be present */ 1136 /* Need Vxlan and inner Ethernet header to be present */
1138 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1137 if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1150,8 +1149,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1150 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) 1149 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1151 goto drop; 1150 goto drop;
1152 1151
1153 port = inet_sk(sk)->inet_sport;
1154
1155 vs = rcu_dereference_sk_user_data(sk); 1152 vs = rcu_dereference_sk_user_data(sk);
1156 if (!vs) 1153 if (!vs)
1157 goto drop; 1154 goto drop;
@@ -2080,19 +2077,11 @@ static int vxlan_init(struct net_device *dev)
2080 struct vxlan_dev *vxlan = netdev_priv(dev); 2077 struct vxlan_dev *vxlan = netdev_priv(dev);
2081 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 2078 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2082 struct vxlan_sock *vs; 2079 struct vxlan_sock *vs;
2083 int i;
2084 2080
2085 dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 2081 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2086 if (!dev->tstats) 2082 if (!dev->tstats)
2087 return -ENOMEM; 2083 return -ENOMEM;
2088 2084
2089 for_each_possible_cpu(i) {
2090 struct pcpu_sw_netstats *vxlan_stats;
2091 vxlan_stats = per_cpu_ptr(dev->tstats, i);
2092 u64_stats_init(&vxlan_stats->syncp);
2093 }
2094
2095
2096 spin_lock(&vn->sock_lock); 2085 spin_lock(&vn->sock_lock);
2097 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); 2086 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
2098 if (vs) { 2087 if (vs) {
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 48896138418f..a9970f1af976 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -374,8 +374,7 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
374 374
375 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); 375 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
376 376
377 if (skb_header_cloned(skb) && 377 if (skb_cow_head(skb, 0))
378 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
379 goto drop; 378 goto drop;
380 379
381 if (i2400m->state == I2400M_SS_IDLE) 380 if (i2400m->state == I2400M_SS_IDLE)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 200020eb3005..b2137e8f7ca6 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -53,7 +53,7 @@ config LIBERTAS_THINFIRM_USB
53 53
54config AIRO 54config AIRO
55 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 55 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
56 depends on ISA_DMA_API && (PCI || BROKEN) 56 depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
57 select WIRELESS_EXT 57 select WIRELESS_EXT
58 select CRYPTO 58 select CRYPTO
59 select WEXT_SPY 59 select WEXT_SPY
@@ -73,7 +73,7 @@ config AIRO
73 73
74config ATMEL 74config ATMEL
75 tristate "Atmel at76c50x chipset 802.11b support" 75 tristate "Atmel at76c50x chipset 802.11b support"
76 depends on (PCI || PCMCIA) 76 depends on CFG80211 && (PCI || PCMCIA)
77 select WIRELESS_EXT 77 select WIRELESS_EXT
78 select WEXT_PRIV 78 select WEXT_PRIV
79 select FW_LOADER 79 select FW_LOADER
@@ -116,7 +116,7 @@ config AT76C50X_USB
116 116
117config AIRO_CS 117config AIRO_CS
118 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 118 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
119 depends on PCMCIA && (BROKEN || !M32R) 119 depends on CFG80211 && PCMCIA && (BROKEN || !M32R)
120 select WIRELESS_EXT 120 select WIRELESS_EXT
121 select WEXT_SPY 121 select WEXT_SPY
122 select WEXT_PRIV 122 select WEXT_PRIV
@@ -138,7 +138,7 @@ config AIRO_CS
138 138
139config PCMCIA_WL3501 139config PCMCIA_WL3501
140 tristate "Planet WL3501 PCMCIA cards" 140 tristate "Planet WL3501 PCMCIA cards"
141 depends on PCMCIA 141 depends on CFG80211 && PCMCIA
142 select WIRELESS_EXT 142 select WIRELESS_EXT
143 select WEXT_SPY 143 select WEXT_SPY
144 help 144 help
@@ -168,7 +168,7 @@ config PRISM54
168 168
169config USB_ZD1201 169config USB_ZD1201
170 tristate "USB ZD1201 based Wireless device support" 170 tristate "USB ZD1201 based Wireless device support"
171 depends on USB 171 depends on CFG80211 && USB
172 select WIRELESS_EXT 172 select WIRELESS_EXT
173 select WEXT_PRIV 173 select WEXT_PRIV
174 select FW_LOADER 174 select FW_LOADER
@@ -281,5 +281,6 @@ source "drivers/net/wireless/ti/Kconfig"
281source "drivers/net/wireless/zd1211rw/Kconfig" 281source "drivers/net/wireless/zd1211rw/Kconfig"
282source "drivers/net/wireless/mwifiex/Kconfig" 282source "drivers/net/wireless/mwifiex/Kconfig"
283source "drivers/net/wireless/cw1200/Kconfig" 283source "drivers/net/wireless/cw1200/Kconfig"
284source "drivers/net/wireless/rsi/Kconfig"
284 285
285endif # WLAN 286endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0fab227025be..0c8891686718 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -59,3 +59,4 @@ obj-$(CONFIG_BRCMFMAC) += brcm80211/
59obj-$(CONFIG_BRCMSMAC) += brcm80211/ 59obj-$(CONFIG_BRCMSMAC) += brcm80211/
60 60
61obj-$(CONFIG_CW1200) += cw1200/ 61obj-$(CONFIG_CW1200) += cw1200/
62obj-$(CONFIG_RSI_91X) += rsi/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index edf4b57c4aaa..64747d457bb3 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -36,7 +36,7 @@
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/scatterlist.h> 37#include <linux/scatterlist.h>
38#include <linux/crypto.h> 38#include <linux/crypto.h>
39#include <asm/io.h> 39#include <linux/io.h>
40#include <asm/unaligned.h> 40#include <asm/unaligned.h>
41 41
42#include <linux/netdevice.h> 42#include <linux/netdevice.h>
@@ -45,11 +45,11 @@
45#include <linux/if_arp.h> 45#include <linux/if_arp.h>
46#include <linux/ioport.h> 46#include <linux/ioport.h>
47#include <linux/pci.h> 47#include <linux/pci.h>
48#include <asm/uaccess.h> 48#include <linux/uaccess.h>
49#include <linux/kthread.h> 49#include <linux/kthread.h>
50#include <linux/freezer.h> 50#include <linux/freezer.h>
51 51
52#include <linux/ieee80211.h> 52#include <net/cfg80211.h>
53#include <net/iw_handler.h> 53#include <net/iw_handler.h>
54 54
55#include "airo.h" 55#include "airo.h"
@@ -5797,7 +5797,7 @@ static int airo_set_freq(struct net_device *dev,
5797 5797
5798 /* Hack to fall through... */ 5798 /* Hack to fall through... */
5799 fwrq->e = 0; 5799 fwrq->e = 0;
5800 fwrq->m = ieee80211_freq_to_dsss_chan(f); 5800 fwrq->m = ieee80211_frequency_to_channel(f);
5801 } 5801 }
5802 /* Setting by channel number */ 5802 /* Setting by channel number */
5803 if((fwrq->m > 1000) || (fwrq->e > 0)) 5803 if((fwrq->m > 1000) || (fwrq->e > 0))
@@ -5841,7 +5841,8 @@ static int airo_get_freq(struct net_device *dev,
5841 5841
5842 ch = le16_to_cpu(status_rid.channel); 5842 ch = le16_to_cpu(status_rid.channel);
5843 if((ch > 0) && (ch < 15)) { 5843 if((ch > 0) && (ch < 15)) {
5844 fwrq->m = ieee80211_dsss_chan_to_freq(ch) * 100000; 5844 fwrq->m = 100000 *
5845 ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
5845 fwrq->e = 1; 5846 fwrq->e = 1;
5846 } else { 5847 } else {
5847 fwrq->m = ch; 5848 fwrq->m = ch;
@@ -6898,7 +6899,8 @@ static int airo_get_range(struct net_device *dev,
6898 k = 0; 6899 k = 0;
6899 for(i = 0; i < 14; i++) { 6900 for(i = 0; i < 14; i++) {
6900 range->freq[k].i = i + 1; /* List index */ 6901 range->freq[k].i = i + 1; /* List index */
6901 range->freq[k].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000; 6902 range->freq[k].m = 100000 *
6903 ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
6902 range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */ 6904 range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
6903 } 6905 }
6904 range->num_frequency = k; 6906 range->num_frequency = k;
@@ -7297,7 +7299,8 @@ static inline char *airo_translate_scan(struct net_device *dev,
7297 /* Add frequency */ 7299 /* Add frequency */
7298 iwe.cmd = SIOCGIWFREQ; 7300 iwe.cmd = SIOCGIWFREQ;
7299 iwe.u.freq.m = le16_to_cpu(bss->dsChannel); 7301 iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
7300 iwe.u.freq.m = ieee80211_dsss_chan_to_freq(iwe.u.freq.m) * 100000; 7302 iwe.u.freq.m = 100000 *
7303 ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
7301 iwe.u.freq.e = 1; 7304 iwe.u.freq.e = 1;
7302 current_ev = iwe_stream_add_event(info, current_ev, end_buf, 7305 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
7303 &iwe, IW_EV_FREQ_LEN); 7306 &iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index b59cfbe0276b..a889fd66fc63 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -56,6 +56,15 @@ enum ath_device_state {
56 ATH_HW_INITIALIZED, 56 ATH_HW_INITIALIZED,
57}; 57};
58 58
59enum ath_op_flags {
60 ATH_OP_INVALID,
61 ATH_OP_BEACONS,
62 ATH_OP_ANI_RUN,
63 ATH_OP_PRIM_STA_VIF,
64 ATH_OP_HW_RESET,
65 ATH_OP_SCANNING,
66};
67
59enum ath_bus_type { 68enum ath_bus_type {
60 ATH_PCI, 69 ATH_PCI,
61 ATH_AHB, 70 ATH_AHB,
@@ -63,7 +72,7 @@ enum ath_bus_type {
63}; 72};
64 73
65struct reg_dmn_pair_mapping { 74struct reg_dmn_pair_mapping {
66 u16 regDmnEnum; 75 u16 reg_domain;
67 u16 reg_5ghz_ctl; 76 u16 reg_5ghz_ctl;
68 u16 reg_2ghz_ctl; 77 u16 reg_2ghz_ctl;
69}; 78};
@@ -130,6 +139,7 @@ struct ath_common {
130 struct ieee80211_hw *hw; 139 struct ieee80211_hw *hw;
131 int debug_mask; 140 int debug_mask;
132 enum ath_device_state state; 141 enum ath_device_state state;
142 unsigned long op_flags;
133 143
134 struct ath_ani ani; 144 struct ath_ani ani;
135 145
@@ -161,6 +171,9 @@ struct ath_common {
161 bool btcoex_enabled; 171 bool btcoex_enabled;
162 bool disable_ani; 172 bool disable_ani;
163 bool bt_ant_diversity; 173 bool bt_ant_diversity;
174
175 int last_rssi;
176 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
164}; 177};
165 178
166struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, 179struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index d44d618b05f9..a79499c82350 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -266,12 +266,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
266 * ath10k_ce_sendlist_send. 266 * ath10k_ce_sendlist_send.
267 * The caller takes responsibility for any needed locking. 267 * The caller takes responsibility for any needed locking.
268 */ 268 */
269static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, 269int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
270 void *per_transfer_context, 270 void *per_transfer_context,
271 u32 buffer, 271 u32 buffer,
272 unsigned int nbytes, 272 unsigned int nbytes,
273 unsigned int transfer_id, 273 unsigned int transfer_id,
274 unsigned int flags) 274 unsigned int flags)
275{ 275{
276 struct ath10k *ar = ce_state->ar; 276 struct ath10k *ar = ce_state->ar;
277 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 277 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -1067,9 +1067,9 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1067 * 1067 *
1068 * For the lack of a better place do the check here. 1068 * For the lack of a better place do the check here.
1069 */ 1069 */
1070 BUILD_BUG_ON(TARGET_NUM_MSDU_DESC > 1070 BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
1071 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1071 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1072 BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC > 1072 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
1073 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1073 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1074 1074
1075 ret = ath10k_pci_wake(ar); 1075 ret = ath10k_pci_wake(ar);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 67dbde6a5c74..8eb7f99ed992 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -23,7 +23,7 @@
23 23
24/* Maximum number of Copy Engine's supported */ 24/* Maximum number of Copy Engine's supported */
25#define CE_COUNT_MAX 8 25#define CE_COUNT_MAX 8
26#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 26#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
27 27
28/* Descriptor rings must be aligned to this boundary */ 28/* Descriptor rings must be aligned to this boundary */
29#define CE_DESC_RING_ALIGN 8 29#define CE_DESC_RING_ALIGN 8
@@ -152,6 +152,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
152 unsigned int transfer_id, 152 unsigned int transfer_id,
153 unsigned int flags); 153 unsigned int flags);
154 154
155int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
156 void *per_transfer_context,
157 u32 buffer,
158 unsigned int nbytes,
159 unsigned int transfer_id,
160 unsigned int flags);
161
155void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, 162void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
156 void (*send_cb)(struct ath10k_ce_pipe *), 163 void (*send_cb)(struct ath10k_ce_pipe *),
157 int disable_interrupts); 164 int disable_interrupts);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 3b59af3bddf4..ebc5fc2ede75 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -55,8 +55,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
55{ 55{
56 ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n"); 56 ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
57 57
58 ar->is_target_paused = true; 58 complete(&ar->target_suspend);
59 wake_up(&ar->event_queue);
60} 59}
61 60
62static int ath10k_init_connect_htc(struct ath10k *ar) 61static int ath10k_init_connect_htc(struct ath10k *ar)
@@ -470,8 +469,12 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
470 if (index == ie_len) 469 if (index == ie_len)
471 break; 470 break;
472 471
473 if (data[index] & (1 << bit)) 472 if (data[index] & (1 << bit)) {
473 ath10k_dbg(ATH10K_DBG_BOOT,
474 "Enabling feature bit: %i\n",
475 i);
474 __set_bit(i, ar->fw_features); 476 __set_bit(i, ar->fw_features);
477 }
475 } 478 }
476 479
477 ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "", 480 ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
@@ -699,6 +702,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
699 init_completion(&ar->scan.started); 702 init_completion(&ar->scan.started);
700 init_completion(&ar->scan.completed); 703 init_completion(&ar->scan.completed);
701 init_completion(&ar->scan.on_channel); 704 init_completion(&ar->scan.on_channel);
705 init_completion(&ar->target_suspend);
702 706
703 init_completion(&ar->install_key_done); 707 init_completion(&ar->install_key_done);
704 init_completion(&ar->vdev_setup_done); 708 init_completion(&ar->vdev_setup_done);
@@ -722,8 +726,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
722 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work); 726 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
723 skb_queue_head_init(&ar->wmi_mgmt_tx_queue); 727 skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
724 728
725 init_waitqueue_head(&ar->event_queue);
726
727 INIT_WORK(&ar->restart_work, ath10k_core_restart); 729 INIT_WORK(&ar->restart_work, ath10k_core_restart);
728 730
729 return ar; 731 return ar;
@@ -856,10 +858,34 @@ err:
856} 858}
857EXPORT_SYMBOL(ath10k_core_start); 859EXPORT_SYMBOL(ath10k_core_start);
858 860
861int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
862{
863 int ret;
864
865 reinit_completion(&ar->target_suspend);
866
867 ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
868 if (ret) {
869 ath10k_warn("could not suspend target (%d)\n", ret);
870 return ret;
871 }
872
873 ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
874
875 if (ret == 0) {
876 ath10k_warn("suspend timed out - target pause event never came\n");
877 return -ETIMEDOUT;
878 }
879
880 return 0;
881}
882
859void ath10k_core_stop(struct ath10k *ar) 883void ath10k_core_stop(struct ath10k *ar)
860{ 884{
861 lockdep_assert_held(&ar->conf_mutex); 885 lockdep_assert_held(&ar->conf_mutex);
862 886
887 /* try to suspend target */
888 ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
863 ath10k_debug_stop(ar); 889 ath10k_debug_stop(ar);
864 ath10k_htc_stop(&ar->htc); 890 ath10k_htc_stop(&ar->htc);
865 ath10k_htt_detach(&ar->htt); 891 ath10k_htt_detach(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index ade1781c7186..0e71979d837c 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -46,21 +46,35 @@
46 46
47#define ATH10K_MAX_NUM_MGMT_PENDING 128 47#define ATH10K_MAX_NUM_MGMT_PENDING 128
48 48
49/* number of failed packets */
50#define ATH10K_KICKOUT_THRESHOLD 50
51
52/*
53 * Use insanely high numbers to make sure that the firmware implementation
54 * won't start, we have the same functionality already in hostapd. Unit
55 * is seconds.
56 */
57#define ATH10K_KEEPALIVE_MIN_IDLE 3747
58#define ATH10K_KEEPALIVE_MAX_IDLE 3895
59#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
60
49struct ath10k; 61struct ath10k;
50 62
51struct ath10k_skb_cb { 63struct ath10k_skb_cb {
52 dma_addr_t paddr; 64 dma_addr_t paddr;
53 bool is_mapped;
54 bool is_aborted;
55 u8 vdev_id; 65 u8 vdev_id;
56 66
57 struct { 67 struct {
58 u8 tid; 68 u8 tid;
59 bool is_offchan; 69 bool is_offchan;
60 70 struct ath10k_htt_txbuf *txbuf;
61 u8 frag_len; 71 u32 txbuf_paddr;
62 u8 pad_len;
63 } __packed htt; 72 } __packed htt;
73
74 struct {
75 bool dtim_zero;
76 bool deliver_cab;
77 } bcn;
64} __packed; 78} __packed;
65 79
66static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) 80static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -70,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
70 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; 84 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
71} 85}
72 86
73static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
74{
75 if (ATH10K_SKB_CB(skb)->is_mapped)
76 return -EINVAL;
77
78 ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
79 DMA_TO_DEVICE);
80
81 if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
82 return -EIO;
83
84 ATH10K_SKB_CB(skb)->is_mapped = true;
85 return 0;
86}
87
88static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
89{
90 if (!ATH10K_SKB_CB(skb)->is_mapped)
91 return -EINVAL;
92
93 dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
94 DMA_TO_DEVICE);
95 ATH10K_SKB_CB(skb)->is_mapped = false;
96 return 0;
97}
98
99static inline u32 host_interest_item_address(u32 item_offset) 87static inline u32 host_interest_item_address(u32 item_offset)
100{ 88{
101 return QCA988X_HOST_INTEREST_ADDRESS + item_offset; 89 return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@@ -211,6 +199,18 @@ struct ath10k_peer {
211 struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; 199 struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
212}; 200};
213 201
202struct ath10k_sta {
203 struct ath10k_vif *arvif;
204
205 /* the following are protected by ar->data_lock */
206 u32 changed; /* IEEE80211_RC_* */
207 u32 bw;
208 u32 nss;
209 u32 smps;
210
211 struct work_struct update_wk;
212};
213
214#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) 214#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
215 215
216struct ath10k_vif { 216struct ath10k_vif {
@@ -222,10 +222,17 @@ struct ath10k_vif {
222 u32 beacon_interval; 222 u32 beacon_interval;
223 u32 dtim_period; 223 u32 dtim_period;
224 struct sk_buff *beacon; 224 struct sk_buff *beacon;
225 /* protected by data_lock */
226 bool beacon_sent;
225 227
226 struct ath10k *ar; 228 struct ath10k *ar;
227 struct ieee80211_vif *vif; 229 struct ieee80211_vif *vif;
228 230
231 bool is_started;
232 bool is_up;
233 u32 aid;
234 u8 bssid[ETH_ALEN];
235
229 struct work_struct wep_key_work; 236 struct work_struct wep_key_work;
230 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; 237 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
231 u8 def_wep_key_idx; 238 u8 def_wep_key_idx;
@@ -235,7 +242,6 @@ struct ath10k_vif {
235 242
236 union { 243 union {
237 struct { 244 struct {
238 u8 bssid[ETH_ALEN];
239 u32 uapsd; 245 u32 uapsd;
240 } sta; 246 } sta;
241 struct { 247 struct {
@@ -249,13 +255,11 @@ struct ath10k_vif {
249 u32 noa_len; 255 u32 noa_len;
250 u8 *noa_data; 256 u8 *noa_data;
251 } ap; 257 } ap;
252 struct {
253 u8 bssid[ETH_ALEN];
254 } ibss;
255 } u; 258 } u;
256 259
257 u8 fixed_rate; 260 u8 fixed_rate;
258 u8 fixed_nss; 261 u8 fixed_nss;
262 u8 force_sgi;
259}; 263};
260 264
261struct ath10k_vif_iter { 265struct ath10k_vif_iter {
@@ -355,8 +359,7 @@ struct ath10k {
355 const struct ath10k_hif_ops *ops; 359 const struct ath10k_hif_ops *ops;
356 } hif; 360 } hif;
357 361
358 wait_queue_head_t event_queue; 362 struct completion target_suspend;
359 bool is_target_paused;
360 363
361 struct ath10k_bmi bmi; 364 struct ath10k_bmi bmi;
362 struct ath10k_wmi wmi; 365 struct ath10k_wmi wmi;
@@ -412,6 +415,9 @@ struct ath10k {
412 /* valid during scan; needed for mgmt rx during scan */ 415 /* valid during scan; needed for mgmt rx during scan */
413 struct ieee80211_channel *scan_channel; 416 struct ieee80211_channel *scan_channel;
414 417
418 /* current operating channel definition */
419 struct cfg80211_chan_def chandef;
420
415 int free_vdev_map; 421 int free_vdev_map;
416 int monitor_vdev_id; 422 int monitor_vdev_id;
417 bool monitor_enabled; 423 bool monitor_enabled;
@@ -470,6 +476,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
470void ath10k_core_destroy(struct ath10k *ar); 476void ath10k_core_destroy(struct ath10k *ar);
471 477
472int ath10k_core_start(struct ath10k *ar); 478int ath10k_core_start(struct ath10k *ar);
479int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
473void ath10k_core_stop(struct ath10k *ar); 480void ath10k_core_stop(struct ath10k *ar);
474int ath10k_core_register(struct ath10k *ar, u32 chip_id); 481int ath10k_core_register(struct ath10k *ar, u32 chip_id);
475void ath10k_core_unregister(struct ath10k *ar); 482void ath10k_core_unregister(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 1773c36c71a0..a5824990bd2a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -92,7 +92,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
92 92
93#ifdef CONFIG_ATH10K_DEBUG 93#ifdef CONFIG_ATH10K_DEBUG
94__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, 94__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
95 const char *fmt, ...); 95 const char *fmt, ...);
96void ath10k_dbg_dump(enum ath10k_debug_mask mask, 96void ath10k_dbg_dump(enum ath10k_debug_mask mask,
97 const char *msg, const char *prefix, 97 const char *msg, const char *prefix,
98 const void *buf, size_t len); 98 const void *buf, size_t len);
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index dcdea68bcc0a..2ac7beacddca 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -21,6 +21,14 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include "core.h" 22#include "core.h"
23 23
24struct ath10k_hif_sg_item {
25 u16 transfer_id;
26 void *transfer_context; /* NULL = tx completion callback not called */
27 void *vaddr; /* for debugging mostly */
28 u32 paddr;
29 u16 len;
30};
31
24struct ath10k_hif_cb { 32struct ath10k_hif_cb {
25 int (*tx_completion)(struct ath10k *ar, 33 int (*tx_completion)(struct ath10k *ar,
26 struct sk_buff *wbuf, 34 struct sk_buff *wbuf,
@@ -31,11 +39,9 @@ struct ath10k_hif_cb {
31}; 39};
32 40
33struct ath10k_hif_ops { 41struct ath10k_hif_ops {
34 /* Send the head of a buffer to HIF for transmission to the target. */ 42 /* send a scatter-gather list to the target */
35 int (*send_head)(struct ath10k *ar, u8 pipe_id, 43 int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
36 unsigned int transfer_id, 44 struct ath10k_hif_sg_item *items, int n_items);
37 unsigned int nbytes,
38 struct sk_buff *buf);
39 45
40 /* 46 /*
41 * API to handle HIF-specific BMI message exchanges, this API is 47 * API to handle HIF-specific BMI message exchanges, this API is
@@ -86,12 +92,11 @@ struct ath10k_hif_ops {
86}; 92};
87 93
88 94
89static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id, 95static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
90 unsigned int transfer_id, 96 struct ath10k_hif_sg_item *items,
91 unsigned int nbytes, 97 int n_items)
92 struct sk_buff *buf)
93{ 98{
94 return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf); 99 return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
95} 100}
96 101
97static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, 102static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index edc57ab505c8..7f1bccd3597f 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
63static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, 63static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
64 struct sk_buff *skb) 64 struct sk_buff *skb)
65{ 65{
66 ath10k_skb_unmap(htc->ar->dev, skb); 66 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
67
68 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
67 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 69 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
68} 70}
69 71
@@ -122,6 +124,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
122 struct sk_buff *skb) 124 struct sk_buff *skb)
123{ 125{
124 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 126 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
127 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
128 struct ath10k_hif_sg_item sg_item;
129 struct device *dev = htc->ar->dev;
125 int credits = 0; 130 int credits = 0;
126 int ret; 131 int ret;
127 132
@@ -157,19 +162,25 @@ int ath10k_htc_send(struct ath10k_htc *htc,
157 162
158 ath10k_htc_prepare_tx_skb(ep, skb); 163 ath10k_htc_prepare_tx_skb(ep, skb);
159 164
160 ret = ath10k_skb_map(htc->ar->dev, skb); 165 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
166 ret = dma_mapping_error(dev, skb_cb->paddr);
161 if (ret) 167 if (ret)
162 goto err_credits; 168 goto err_credits;
163 169
164 ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid, 170 sg_item.transfer_id = ep->eid;
165 skb->len, skb); 171 sg_item.transfer_context = skb;
172 sg_item.vaddr = skb->data;
173 sg_item.paddr = skb_cb->paddr;
174 sg_item.len = skb->len;
175
176 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
166 if (ret) 177 if (ret)
167 goto err_unmap; 178 goto err_unmap;
168 179
169 return 0; 180 return 0;
170 181
171err_unmap: 182err_unmap:
172 ath10k_skb_unmap(htc->ar->dev, skb); 183 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
173err_credits: 184err_credits:
174 if (ep->tx_credit_flow_enabled) { 185 if (ep->tx_credit_flow_enabled) {
175 spin_lock_bh(&htc->tx_lock); 186 spin_lock_bh(&htc->tx_lock);
@@ -191,10 +202,8 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
191 struct ath10k_htc *htc = &ar->htc; 202 struct ath10k_htc *htc = &ar->htc;
192 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 203 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
193 204
194 if (!skb) { 205 if (WARN_ON_ONCE(!skb))
195 ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
196 return 0; 206 return 0;
197 }
198 207
199 ath10k_htc_notify_tx_completion(ep, skb); 208 ath10k_htc_notify_tx_completion(ep, skb);
200 /* the skb now belongs to the completion handler */ 209 /* the skb now belongs to the completion handler */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index b93ae355bc08..654867fc1ae7 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -20,6 +20,7 @@
20 20
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/dmapool.h>
23 24
24#include "htc.h" 25#include "htc.h"
25#include "rx_desc.h" 26#include "rx_desc.h"
@@ -1181,11 +1182,20 @@ struct htt_rx_info {
1181 u32 info1; 1182 u32 info1;
1182 u32 info2; 1183 u32 info2;
1183 } rate; 1184 } rate;
1185
1186 u32 tsf;
1184 bool fcs_err; 1187 bool fcs_err;
1185 bool amsdu_more; 1188 bool amsdu_more;
1186 bool mic_err; 1189 bool mic_err;
1187}; 1190};
1188 1191
1192struct ath10k_htt_txbuf {
1193 struct htt_data_tx_desc_frag frags[2];
1194 struct ath10k_htc_hdr htc_hdr;
1195 struct htt_cmd_hdr cmd_hdr;
1196 struct htt_data_tx_desc cmd_tx;
1197} __packed;
1198
1189struct ath10k_htt { 1199struct ath10k_htt {
1190 struct ath10k *ar; 1200 struct ath10k *ar;
1191 enum ath10k_htc_ep_id eid; 1201 enum ath10k_htc_ep_id eid;
@@ -1267,11 +1277,18 @@ struct ath10k_htt {
1267 struct sk_buff **pending_tx; 1277 struct sk_buff **pending_tx;
1268 unsigned long *used_msdu_ids; /* bitmap */ 1278 unsigned long *used_msdu_ids; /* bitmap */
1269 wait_queue_head_t empty_tx_wq; 1279 wait_queue_head_t empty_tx_wq;
1280 struct dma_pool *tx_pool;
1270 1281
1271 /* set if host-fw communication goes haywire 1282 /* set if host-fw communication goes haywire
1272 * used to avoid further failures */ 1283 * used to avoid further failures */
1273 bool rx_confused; 1284 bool rx_confused;
1274 struct tasklet_struct rx_replenish_task; 1285 struct tasklet_struct rx_replenish_task;
1286
1287 /* This is used to group tx/rx completions separately and process them
1288 * in batches to reduce cache stalls */
1289 struct tasklet_struct txrx_compl_task;
1290 struct sk_buff_head tx_compl_q;
1291 struct sk_buff_head rx_compl_q;
1275}; 1292};
1276 1293
1277#define RX_HTT_HDR_STATUS_LEN 64 1294#define RX_HTT_HDR_STATUS_LEN 64
@@ -1343,4 +1360,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
1343void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); 1360void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
1344int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); 1361int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
1345int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); 1362int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
1363
1346#endif 1364#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index fe8bd1b59f0e..cdcbe2de95f9 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -43,7 +43,7 @@
43 43
44 44
45static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 45static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
46 46static void ath10k_htt_txrx_compl_task(unsigned long ptr);
47 47
48static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) 48static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
49{ 49{
@@ -225,18 +225,16 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
225 ath10k_htt_rx_msdu_buff_replenish(htt); 225 ath10k_htt_rx_msdu_buff_replenish(htt);
226} 226}
227 227
228static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
229{
230 return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
231 htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
232}
233
234void ath10k_htt_rx_detach(struct ath10k_htt *htt) 228void ath10k_htt_rx_detach(struct ath10k_htt *htt)
235{ 229{
236 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 230 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
237 231
238 del_timer_sync(&htt->rx_ring.refill_retry_timer); 232 del_timer_sync(&htt->rx_ring.refill_retry_timer);
239 tasklet_kill(&htt->rx_replenish_task); 233 tasklet_kill(&htt->rx_replenish_task);
234 tasklet_kill(&htt->txrx_compl_task);
235
236 skb_queue_purge(&htt->tx_compl_q);
237 skb_queue_purge(&htt->rx_compl_q);
240 238
241 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 239 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
242 struct sk_buff *skb = 240 struct sk_buff *skb =
@@ -270,10 +268,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
270 int idx; 268 int idx;
271 struct sk_buff *msdu; 269 struct sk_buff *msdu;
272 270
273 spin_lock_bh(&htt->rx_ring.lock); 271 lockdep_assert_held(&htt->rx_ring.lock);
274 272
275 if (ath10k_htt_rx_ring_elems(htt) == 0) 273 if (htt->rx_ring.fill_cnt == 0) {
276 ath10k_warn("htt rx ring is empty!\n"); 274 ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
275 return NULL;
276 }
277 277
278 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 278 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
279 msdu = htt->rx_ring.netbufs_ring[idx]; 279 msdu = htt->rx_ring.netbufs_ring[idx];
@@ -283,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
283 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 283 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
284 htt->rx_ring.fill_cnt--; 284 htt->rx_ring.fill_cnt--;
285 285
286 spin_unlock_bh(&htt->rx_ring.lock);
287 return msdu; 286 return msdu;
288} 287}
289 288
@@ -307,8 +306,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
307 struct sk_buff *msdu; 306 struct sk_buff *msdu;
308 struct htt_rx_desc *rx_desc; 307 struct htt_rx_desc *rx_desc;
309 308
310 if (ath10k_htt_rx_ring_elems(htt) == 0) 309 lockdep_assert_held(&htt->rx_ring.lock);
311 ath10k_warn("htt rx ring is empty!\n");
312 310
313 if (htt->rx_confused) { 311 if (htt->rx_confused) {
314 ath10k_warn("htt is confused. refusing rx\n"); 312 ath10k_warn("htt is confused. refusing rx\n");
@@ -324,7 +322,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
324 msdu->len + skb_tailroom(msdu), 322 msdu->len + skb_tailroom(msdu),
325 DMA_FROM_DEVICE); 323 DMA_FROM_DEVICE);
326 324
327 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", 325 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
328 msdu->data, msdu->len + skb_tailroom(msdu)); 326 msdu->data, msdu->len + skb_tailroom(msdu));
329 327
330 rx_desc = (struct htt_rx_desc *)msdu->data; 328 rx_desc = (struct htt_rx_desc *)msdu->data;
@@ -400,6 +398,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
400 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), 398 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
401 RX_MSDU_START_INFO0_MSDU_LENGTH); 399 RX_MSDU_START_INFO0_MSDU_LENGTH);
402 msdu_chained = rx_desc->frag_info.ring2_more_count; 400 msdu_chained = rx_desc->frag_info.ring2_more_count;
401 msdu_chaining = msdu_chained;
403 402
404 if (msdu_len_invalid) 403 if (msdu_len_invalid)
405 msdu_len = 0; 404 msdu_len = 0;
@@ -417,8 +416,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
417 next->len + skb_tailroom(next), 416 next->len + skb_tailroom(next),
418 DMA_FROM_DEVICE); 417 DMA_FROM_DEVICE);
419 418
420 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", 419 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
421 next->data, 420 "htt rx chained: ", next->data,
422 next->len + skb_tailroom(next)); 421 next->len + skb_tailroom(next));
423 422
424 skb_trim(next, 0); 423 skb_trim(next, 0);
@@ -427,13 +426,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
427 426
428 msdu->next = next; 427 msdu->next = next;
429 msdu = next; 428 msdu = next;
430 msdu_chaining = 1;
431 }
432
433 if (msdu_len > 0) {
434 /* This may suggest FW bug? */
435 ath10k_warn("htt rx msdu len not consumed (%d)\n",
436 msdu_len);
437 } 429 }
438 430
439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 431 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
@@ -535,6 +527,12 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
535 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, 527 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
536 (unsigned long)htt); 528 (unsigned long)htt);
537 529
530 skb_queue_head_init(&htt->tx_compl_q);
531 skb_queue_head_init(&htt->rx_compl_q);
532
533 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
534 (unsigned long)htt);
535
538 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 536 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
539 htt->rx_ring.size, htt->rx_ring.fill_level); 537 htt->rx_ring.size, htt->rx_ring.fill_level);
540 return 0; 538 return 0;
@@ -638,6 +636,12 @@ struct amsdu_subframe_hdr {
638 __be16 len; 636 __be16 len;
639} __packed; 637} __packed;
640 638
639static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
640{
641 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
642 return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
643}
644
641static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, 645static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
642 struct htt_rx_info *info) 646 struct htt_rx_info *info)
643{ 647{
@@ -687,7 +691,7 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
687 case RX_MSDU_DECAP_NATIVE_WIFI: 691 case RX_MSDU_DECAP_NATIVE_WIFI:
688 /* pull decapped header and copy DA */ 692 /* pull decapped header and copy DA */
689 hdr = (struct ieee80211_hdr *)skb->data; 693 hdr = (struct ieee80211_hdr *)skb->data;
690 hdr_len = ieee80211_hdrlen(hdr->frame_control); 694 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
691 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); 695 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
692 skb_pull(skb, hdr_len); 696 skb_pull(skb, hdr_len);
693 697
@@ -751,7 +755,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
751 755
752 /* This shouldn't happen. If it does than it may be a FW bug. */ 756 /* This shouldn't happen. If it does than it may be a FW bug. */
753 if (skb->next) { 757 if (skb->next) {
754 ath10k_warn("received chained non A-MSDU frame\n"); 758 ath10k_warn("htt rx received chained non A-MSDU frame\n");
755 ath10k_htt_rx_free_msdu_chain(skb->next); 759 ath10k_htt_rx_free_msdu_chain(skb->next);
756 skb->next = NULL; 760 skb->next = NULL;
757 } 761 }
@@ -774,7 +778,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
774 case RX_MSDU_DECAP_NATIVE_WIFI: 778 case RX_MSDU_DECAP_NATIVE_WIFI:
775 /* Pull decapped header */ 779 /* Pull decapped header */
776 hdr = (struct ieee80211_hdr *)skb->data; 780 hdr = (struct ieee80211_hdr *)skb->data;
777 hdr_len = ieee80211_hdrlen(hdr->frame_control); 781 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
778 skb_pull(skb, hdr_len); 782 skb_pull(skb, hdr_len);
779 783
780 /* Push original header */ 784 /* Push original header */
@@ -852,6 +856,20 @@ static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
852 return false; 856 return false;
853} 857}
854 858
859static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
860{
861 struct htt_rx_desc *rxd;
862 u32 flags;
863
864 rxd = (void *)skb->data - sizeof(*rxd);
865 flags = __le32_to_cpu(rxd->attention.flags);
866
867 if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
868 return true;
869
870 return false;
871}
872
855static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 873static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
856{ 874{
857 struct htt_rx_desc *rxd; 875 struct htt_rx_desc *rxd;
@@ -883,6 +901,57 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
883 return CHECKSUM_UNNECESSARY; 901 return CHECKSUM_UNNECESSARY;
884} 902}
885 903
904static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
905{
906 struct sk_buff *next = msdu_head->next;
907 struct sk_buff *to_free = next;
908 int space;
909 int total_len = 0;
910
911 /* TODO: Might could optimize this by using
912 * skb_try_coalesce or similar method to
913 * decrease copying, or maybe get mac80211 to
914 * provide a way to just receive a list of
915 * skb?
916 */
917
918 msdu_head->next = NULL;
919
920 /* Allocate total length all at once. */
921 while (next) {
922 total_len += next->len;
923 next = next->next;
924 }
925
926 space = total_len - skb_tailroom(msdu_head);
927 if ((space > 0) &&
928 (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
929 /* TODO: bump some rx-oom error stat */
930 /* put it back together so we can free the
931 * whole list at once.
932 */
933 msdu_head->next = to_free;
934 return -1;
935 }
936
937 /* Walk list again, copying contents into
938 * msdu_head
939 */
940 next = to_free;
941 while (next) {
942 skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
943 next->len);
944 next = next->next;
945 }
946
947 /* If here, we have consolidated skb. Free the
948 * fragments and pass the main skb on up the
949 * stack.
950 */
951 ath10k_htt_rx_free_msdu_chain(to_free);
952 return 0;
953}
954
886static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 955static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
887 struct htt_rx_indication *rx) 956 struct htt_rx_indication *rx)
888{ 957{
@@ -894,6 +963,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
894 u8 *fw_desc; 963 u8 *fw_desc;
895 int i, j; 964 int i, j;
896 965
966 lockdep_assert_held(&htt->rx_ring.lock);
967
897 memset(&info, 0, sizeof(info)); 968 memset(&info, 0, sizeof(info));
898 969
899 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); 970 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
@@ -937,6 +1008,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
937 } 1008 }
938 1009
939 if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { 1010 if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
1011 ath10k_dbg(ATH10K_DBG_HTT,
1012 "htt rx dropping due to decrypt-err\n");
940 ath10k_htt_rx_free_msdu_chain(msdu_head); 1013 ath10k_htt_rx_free_msdu_chain(msdu_head);
941 continue; 1014 continue;
942 } 1015 }
@@ -944,13 +1017,16 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
944 status = info.status; 1017 status = info.status;
945 1018
946 /* Skip mgmt frames while we handle this in WMI */ 1019 /* Skip mgmt frames while we handle this in WMI */
947 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { 1020 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
1021 ath10k_htt_rx_is_mgmt(msdu_head)) {
1022 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
948 ath10k_htt_rx_free_msdu_chain(msdu_head); 1023 ath10k_htt_rx_free_msdu_chain(msdu_head);
949 continue; 1024 continue;
950 } 1025 }
951 1026
952 if (status != HTT_RX_IND_MPDU_STATUS_OK && 1027 if (status != HTT_RX_IND_MPDU_STATUS_OK &&
953 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && 1028 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
1029 status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
954 !htt->ar->monitor_enabled) { 1030 !htt->ar->monitor_enabled) {
955 ath10k_dbg(ATH10K_DBG_HTT, 1031 ath10k_dbg(ATH10K_DBG_HTT,
956 "htt rx ignoring frame w/ status %d\n", 1032 "htt rx ignoring frame w/ status %d\n",
@@ -960,14 +1036,14 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
960 } 1036 }
961 1037
962 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { 1038 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
1039 ath10k_dbg(ATH10K_DBG_HTT,
1040 "htt rx CAC running\n");
963 ath10k_htt_rx_free_msdu_chain(msdu_head); 1041 ath10k_htt_rx_free_msdu_chain(msdu_head);
964 continue; 1042 continue;
965 } 1043 }
966 1044
967 /* FIXME: we do not support chaining yet. 1045 if (msdu_chaining &&
968 * this needs investigation */ 1046 (ath10k_unchain_msdu(msdu_head) < 0)) {
969 if (msdu_chaining) {
970 ath10k_warn("msdu_chaining is true\n");
971 ath10k_htt_rx_free_msdu_chain(msdu_head); 1047 ath10k_htt_rx_free_msdu_chain(msdu_head);
972 continue; 1048 continue;
973 } 1049 }
@@ -975,12 +1051,22 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
975 info.skb = msdu_head; 1051 info.skb = msdu_head;
976 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); 1052 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
977 info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head); 1053 info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
1054
1055 if (info.fcs_err)
1056 ath10k_dbg(ATH10K_DBG_HTT,
1057 "htt rx has FCS err\n");
1058
1059 if (info.mic_err)
1060 ath10k_dbg(ATH10K_DBG_HTT,
1061 "htt rx has MIC err\n");
1062
978 info.signal = ATH10K_DEFAULT_NOISE_FLOOR; 1063 info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
979 info.signal += rx->ppdu.combined_rssi; 1064 info.signal += rx->ppdu.combined_rssi;
980 1065
981 info.rate.info0 = rx->ppdu.info0; 1066 info.rate.info0 = rx->ppdu.info0;
982 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); 1067 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
983 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); 1068 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
1069 info.tsf = __le32_to_cpu(rx->ppdu.tsf);
984 1070
985 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); 1071 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
986 1072
@@ -1014,8 +1100,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1014 1100
1015 msdu_head = NULL; 1101 msdu_head = NULL;
1016 msdu_tail = NULL; 1102 msdu_tail = NULL;
1103
1104 spin_lock_bh(&htt->rx_ring.lock);
1017 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, 1105 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1018 &msdu_head, &msdu_tail); 1106 &msdu_head, &msdu_tail);
1107 spin_unlock_bh(&htt->rx_ring.lock);
1019 1108
1020 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); 1109 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1021 1110
@@ -1095,7 +1184,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1095 1184
1096 skb_trim(info.skb, info.skb->len - trim); 1185 skb_trim(info.skb, info.skb->len - trim);
1097 1186
1098 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", 1187 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
1099 info.skb->data, info.skb->len); 1188 info.skb->data, info.skb->len);
1100 ath10k_process_rx(htt->ar, &info); 1189 ath10k_process_rx(htt->ar, &info);
1101 1190
@@ -1107,6 +1196,45 @@ end:
1107 } 1196 }
1108} 1197}
1109 1198
1199static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1200 struct sk_buff *skb)
1201{
1202 struct ath10k_htt *htt = &ar->htt;
1203 struct htt_resp *resp = (struct htt_resp *)skb->data;
1204 struct htt_tx_done tx_done = {};
1205 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1206 __le16 msdu_id;
1207 int i;
1208
1209 lockdep_assert_held(&htt->tx_lock);
1210
1211 switch (status) {
1212 case HTT_DATA_TX_STATUS_NO_ACK:
1213 tx_done.no_ack = true;
1214 break;
1215 case HTT_DATA_TX_STATUS_OK:
1216 break;
1217 case HTT_DATA_TX_STATUS_DISCARD:
1218 case HTT_DATA_TX_STATUS_POSTPONE:
1219 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1220 tx_done.discard = true;
1221 break;
1222 default:
1223 ath10k_warn("unhandled tx completion status %d\n", status);
1224 tx_done.discard = true;
1225 break;
1226 }
1227
1228 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1229 resp->data_tx_completion.num_msdus);
1230
1231 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1232 msdu_id = resp->data_tx_completion.msdus[i];
1233 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1234 ath10k_txrx_tx_unref(htt, &tx_done);
1235 }
1236}
1237
1110void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 1238void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1111{ 1239{
1112 struct ath10k_htt *htt = &ar->htt; 1240 struct ath10k_htt *htt = &ar->htt;
@@ -1116,7 +1244,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1116 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 1244 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1117 ath10k_warn("unaligned htt message, expect trouble\n"); 1245 ath10k_warn("unaligned htt message, expect trouble\n");
1118 1246
1119 ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", 1247 ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
1120 resp->hdr.msg_type); 1248 resp->hdr.msg_type);
1121 switch (resp->hdr.msg_type) { 1249 switch (resp->hdr.msg_type) {
1122 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 1250 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
@@ -1125,10 +1253,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1125 complete(&htt->target_version_received); 1253 complete(&htt->target_version_received);
1126 break; 1254 break;
1127 } 1255 }
1128 case HTT_T2H_MSG_TYPE_RX_IND: { 1256 case HTT_T2H_MSG_TYPE_RX_IND:
1129 ath10k_htt_rx_handler(htt, &resp->rx_ind); 1257 spin_lock_bh(&htt->rx_ring.lock);
1130 break; 1258 __skb_queue_tail(&htt->rx_compl_q, skb);
1131 } 1259 spin_unlock_bh(&htt->rx_ring.lock);
1260 tasklet_schedule(&htt->txrx_compl_task);
1261 return;
1132 case HTT_T2H_MSG_TYPE_PEER_MAP: { 1262 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1133 struct htt_peer_map_event ev = { 1263 struct htt_peer_map_event ev = {
1134 .vdev_id = resp->peer_map.vdev_id, 1264 .vdev_id = resp->peer_map.vdev_id,
@@ -1163,44 +1293,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1163 break; 1293 break;
1164 } 1294 }
1165 1295
1296 spin_lock_bh(&htt->tx_lock);
1166 ath10k_txrx_tx_unref(htt, &tx_done); 1297 ath10k_txrx_tx_unref(htt, &tx_done);
1298 spin_unlock_bh(&htt->tx_lock);
1167 break; 1299 break;
1168 } 1300 }
1169 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { 1301 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1170 struct htt_tx_done tx_done = {}; 1302 spin_lock_bh(&htt->tx_lock);
1171 int status = MS(resp->data_tx_completion.flags, 1303 __skb_queue_tail(&htt->tx_compl_q, skb);
1172 HTT_DATA_TX_STATUS); 1304 spin_unlock_bh(&htt->tx_lock);
1173 __le16 msdu_id; 1305 tasklet_schedule(&htt->txrx_compl_task);
1174 int i; 1306 return;
1175
1176 switch (status) {
1177 case HTT_DATA_TX_STATUS_NO_ACK:
1178 tx_done.no_ack = true;
1179 break;
1180 case HTT_DATA_TX_STATUS_OK:
1181 break;
1182 case HTT_DATA_TX_STATUS_DISCARD:
1183 case HTT_DATA_TX_STATUS_POSTPONE:
1184 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1185 tx_done.discard = true;
1186 break;
1187 default:
1188 ath10k_warn("unhandled tx completion status %d\n",
1189 status);
1190 tx_done.discard = true;
1191 break;
1192 }
1193
1194 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1195 resp->data_tx_completion.num_msdus);
1196
1197 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1198 msdu_id = resp->data_tx_completion.msdus[i];
1199 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1200 ath10k_txrx_tx_unref(htt, &tx_done);
1201 }
1202 break;
1203 }
1204 case HTT_T2H_MSG_TYPE_SEC_IND: { 1307 case HTT_T2H_MSG_TYPE_SEC_IND: {
1205 struct ath10k *ar = htt->ar; 1308 struct ath10k *ar = htt->ar;
1206 struct htt_security_indication *ev = &resp->security_indication; 1309 struct htt_security_indication *ev = &resp->security_indication;
@@ -1240,3 +1343,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1240 /* Free the indication buffer */ 1343 /* Free the indication buffer */
1241 dev_kfree_skb_any(skb); 1344 dev_kfree_skb_any(skb);
1242} 1345}
1346
1347static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1348{
1349 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
1350 struct htt_resp *resp;
1351 struct sk_buff *skb;
1352
1353 spin_lock_bh(&htt->tx_lock);
1354 while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
1355 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
1356 dev_kfree_skb_any(skb);
1357 }
1358 spin_unlock_bh(&htt->tx_lock);
1359
1360 spin_lock_bh(&htt->rx_ring.lock);
1361 while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
1362 resp = (struct htt_resp *)skb->data;
1363 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1364 dev_kfree_skb_any(skb);
1365 }
1366 spin_unlock_bh(&htt->rx_ring.lock);
1367}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index f1d36d2d2723..7a3e2e40dd5c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -109,6 +109,14 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
109 return -ENOMEM; 109 return -ENOMEM;
110 } 110 }
111 111
112 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
113 sizeof(struct ath10k_htt_txbuf), 4, 0);
114 if (!htt->tx_pool) {
115 kfree(htt->used_msdu_ids);
116 kfree(htt->pending_tx);
117 return -ENOMEM;
118 }
119
112 return 0; 120 return 0;
113} 121}
114 122
@@ -117,9 +125,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
117 struct htt_tx_done tx_done = {0}; 125 struct htt_tx_done tx_done = {0};
118 int msdu_id; 126 int msdu_id;
119 127
120 /* No locks needed. Called after communication with the device has 128 spin_lock_bh(&htt->tx_lock);
121 * been stopped. */
122
123 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { 129 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
124 if (!test_bit(msdu_id, htt->used_msdu_ids)) 130 if (!test_bit(msdu_id, htt->used_msdu_ids))
125 continue; 131 continue;
@@ -132,6 +138,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
132 138
133 ath10k_txrx_tx_unref(htt, &tx_done); 139 ath10k_txrx_tx_unref(htt, &tx_done);
134 } 140 }
141 spin_unlock_bh(&htt->tx_lock);
135} 142}
136 143
137void ath10k_htt_tx_detach(struct ath10k_htt *htt) 144void ath10k_htt_tx_detach(struct ath10k_htt *htt)
@@ -139,6 +146,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
139 ath10k_htt_tx_cleanup_pending(htt); 146 ath10k_htt_tx_cleanup_pending(htt);
140 kfree(htt->pending_tx); 147 kfree(htt->pending_tx);
141 kfree(htt->used_msdu_ids); 148 kfree(htt->used_msdu_ids);
149 dma_pool_destroy(htt->tx_pool);
142 return; 150 return;
143} 151}
144 152
@@ -334,7 +342,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
334 goto err_free_msdu_id; 342 goto err_free_msdu_id;
335 } 343 }
336 344
337 res = ath10k_skb_map(dev, msdu); 345 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
346 DMA_TO_DEVICE);
347 res = dma_mapping_error(dev, skb_cb->paddr);
338 if (res) 348 if (res)
339 goto err_free_txdesc; 349 goto err_free_txdesc;
340 350
@@ -348,8 +358,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
348 memcpy(cmd->mgmt_tx.hdr, msdu->data, 358 memcpy(cmd->mgmt_tx.hdr, msdu->data,
349 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 359 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
350 360
351 skb_cb->htt.frag_len = 0; 361 skb_cb->htt.txbuf = NULL;
352 skb_cb->htt.pad_len = 0;
353 362
354 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 363 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
355 if (res) 364 if (res)
@@ -358,7 +367,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
358 return 0; 367 return 0;
359 368
360err_unmap_msdu: 369err_unmap_msdu:
361 ath10k_skb_unmap(dev, msdu); 370 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
362err_free_txdesc: 371err_free_txdesc:
363 dev_kfree_skb_any(txdesc); 372 dev_kfree_skb_any(txdesc);
364err_free_msdu_id: 373err_free_msdu_id:
@@ -375,19 +384,19 @@ err:
375int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 384int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
376{ 385{
377 struct device *dev = htt->ar->dev; 386 struct device *dev = htt->ar->dev;
378 struct htt_cmd *cmd;
379 struct htt_data_tx_desc_frag *tx_frags;
380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 387 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
381 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 388 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
382 struct sk_buff *txdesc = NULL; 389 struct ath10k_hif_sg_item sg_items[2];
383 bool use_frags; 390 struct htt_data_tx_desc_frag *frags;
384 u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; 391 u8 vdev_id = skb_cb->vdev_id;
385 u8 tid; 392 u8 tid = skb_cb->htt.tid;
386 int prefetch_len, desc_len; 393 int prefetch_len;
387 int msdu_id = -1;
388 int res; 394 int res;
389 u8 flags0; 395 u8 flags0 = 0;
390 u16 flags1; 396 u16 msdu_id, flags1 = 0;
397 dma_addr_t paddr;
398 u32 frags_paddr;
399 bool use_frags;
391 400
392 res = ath10k_htt_tx_inc_pending(htt); 401 res = ath10k_htt_tx_inc_pending(htt);
393 if (res) 402 if (res)
@@ -406,114 +415,120 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
406 prefetch_len = min(htt->prefetch_len, msdu->len); 415 prefetch_len = min(htt->prefetch_len, msdu->len);
407 prefetch_len = roundup(prefetch_len, 4); 416 prefetch_len = roundup(prefetch_len, 4);
408 417
409 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
410
411 txdesc = ath10k_htc_alloc_skb(desc_len);
412 if (!txdesc) {
413 res = -ENOMEM;
414 goto err_free_msdu_id;
415 }
416
417 /* Since HTT 3.0 there is no separate mgmt tx command. However in case 418 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
418 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx 419 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
419 * fragment list host driver specifies directly frame pointer. */ 420 * fragment list host driver specifies directly frame pointer. */
420 use_frags = htt->target_version_major < 3 || 421 use_frags = htt->target_version_major < 3 ||
421 !ieee80211_is_mgmt(hdr->frame_control); 422 !ieee80211_is_mgmt(hdr->frame_control);
422 423
423 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { 424 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
424 ath10k_warn("htt alignment check failed. dropping packet.\n"); 425 &paddr);
425 res = -EIO; 426 if (!skb_cb->htt.txbuf)
426 goto err_free_txdesc; 427 goto err_free_msdu_id;
427 } 428 skb_cb->htt.txbuf_paddr = paddr;
428 429
429 if (use_frags) { 430 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
430 skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; 431 DMA_TO_DEVICE);
431 skb_cb->htt.pad_len = (unsigned long)msdu->data - 432 res = dma_mapping_error(dev, skb_cb->paddr);
432 round_down((unsigned long)msdu->data, 4); 433 if (res)
434 goto err_free_txbuf;
433 435
434 skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 436 if (likely(use_frags)) {
435 } else { 437 frags = skb_cb->htt.txbuf->frags;
436 skb_cb->htt.frag_len = 0;
437 skb_cb->htt.pad_len = 0;
438 }
439 438
440 res = ath10k_skb_map(dev, msdu); 439 frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
441 if (res) 440 frags[0].len = __cpu_to_le32(msdu->len);
442 goto err_pull_txfrag; 441 frags[1].paddr = 0;
443 442 frags[1].len = 0;
444 if (use_frags) {
445 dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
446 DMA_TO_DEVICE);
447
448 /* tx fragment list must be terminated with zero-entry */
449 tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
450 tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
451 skb_cb->htt.frag_len +
452 skb_cb->htt.pad_len);
453 tx_frags[0].len = __cpu_to_le32(msdu->len -
454 skb_cb->htt.frag_len -
455 skb_cb->htt.pad_len);
456 tx_frags[1].paddr = __cpu_to_le32(0);
457 tx_frags[1].len = __cpu_to_le32(0);
458
459 dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
460 DMA_TO_DEVICE);
461 }
462 443
463 ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n", 444 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
464 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); 445 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
465 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
466 msdu->data, msdu->len);
467 446
468 skb_put(txdesc, desc_len); 447 frags_paddr = skb_cb->htt.txbuf_paddr;
469 cmd = (struct htt_cmd *)txdesc->data; 448 } else {
449 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
450 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
470 451
471 tid = ATH10K_SKB_CB(msdu)->htt.tid; 452 frags_paddr = skb_cb->paddr;
453 }
472 454
473 ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); 455 /* Normally all commands go through HTC which manages tx credits for
456 * each endpoint and notifies when tx is completed.
457 *
458 * HTT endpoint is creditless so there's no need to care about HTC
459 * flags. In that case it is trivial to fill the HTC header here.
460 *
461 * MSDU transmission is considered completed upon HTT event. This
462 * implies no relevant resources can be freed until after the event is
463 * received. That's why HTC tx completion handler itself is ignored by
464 * setting NULL to transfer_context for all sg items.
465 *
466 * There is simply no point in pushing HTT TX_FRM through HTC tx path
467 * as it's a waste of resources. By bypassing HTC it is possible to
468 * avoid extra memory allocations, compress data structures and thus
469 * improve performance. */
470
471 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
472 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
473 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
474 sizeof(skb_cb->htt.txbuf->cmd_tx) +
475 prefetch_len);
476 skb_cb->htt.txbuf->htc_hdr.flags = 0;
474 477
475 flags0 = 0;
476 if (!ieee80211_has_protected(hdr->frame_control)) 478 if (!ieee80211_has_protected(hdr->frame_control))
477 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 479 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
478 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
479 480
480 if (use_frags) 481 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
481 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
482 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
483 else
484 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
485 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
486 482
487 flags1 = 0;
488 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 483 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
489 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 484 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
490 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 485 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
491 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 486 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
492 487
493 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 488 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
494 cmd->data_tx.flags0 = flags0; 489 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
495 cmd->data_tx.flags1 = __cpu_to_le16(flags1); 490 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
496 cmd->data_tx.len = __cpu_to_le16(msdu->len - 491 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
497 skb_cb->htt.frag_len - 492 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
498 skb_cb->htt.pad_len); 493 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
499 cmd->data_tx.id = __cpu_to_le16(msdu_id); 494 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
500 cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); 495
501 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); 496 ath10k_dbg(ATH10K_DBG_HTT,
502 497 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
503 memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); 498 flags0, flags1, msdu->len, msdu_id, frags_paddr,
499 (u32)skb_cb->paddr, vdev_id, tid);
500 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
501 msdu->data, msdu->len);
504 502
505 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 503 sg_items[0].transfer_id = 0;
504 sg_items[0].transfer_context = NULL;
505 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
506 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
507 sizeof(skb_cb->htt.txbuf->frags);
508 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
509 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
510 sizeof(skb_cb->htt.txbuf->cmd_tx);
511
512 sg_items[1].transfer_id = 0;
513 sg_items[1].transfer_context = NULL;
514 sg_items[1].vaddr = msdu->data;
515 sg_items[1].paddr = skb_cb->paddr;
516 sg_items[1].len = prefetch_len;
517
518 res = ath10k_hif_tx_sg(htt->ar,
519 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
520 sg_items, ARRAY_SIZE(sg_items));
506 if (res) 521 if (res)
507 goto err_unmap_msdu; 522 goto err_unmap_msdu;
508 523
509 return 0; 524 return 0;
510 525
511err_unmap_msdu: 526err_unmap_msdu:
512 ath10k_skb_unmap(dev, msdu); 527 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
513err_pull_txfrag: 528err_free_txbuf:
514 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 529 dma_pool_free(htt->tx_pool,
515err_free_txdesc: 530 skb_cb->htt.txbuf,
516 dev_kfree_skb_any(txdesc); 531 skb_cb->htt.txbuf_paddr);
517err_free_msdu_id: 532err_free_msdu_id:
518 spin_lock_bh(&htt->tx_lock); 533 spin_lock_bh(&htt->tx_lock);
519 htt->pending_tx[msdu_id] = NULL; 534 htt->pending_tx[msdu_id] = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f1505a25d810..35fc44e281f5 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -205,8 +205,11 @@ enum ath10k_mcast2ucast_mode {
205#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 205#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
206#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 206#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
207 207
208#define SOC_RESET_CONTROL_ADDRESS 0x00000000
208#define SOC_RESET_CONTROL_OFFSET 0x00000000 209#define SOC_RESET_CONTROL_OFFSET 0x00000000
209#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 210#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
211#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
212#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
210#define SOC_CPU_CLOCK_OFFSET 0x00000020 213#define SOC_CPU_CLOCK_OFFSET 0x00000020
211#define SOC_CPU_CLOCK_STANDARD_LSB 0 214#define SOC_CPU_CLOCK_STANDARD_LSB 0
212#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 215#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
@@ -216,6 +219,8 @@ enum ath10k_mcast2ucast_mode {
216#define SOC_LPO_CAL_OFFSET 0x000000e0 219#define SOC_LPO_CAL_OFFSET 0x000000e0
217#define SOC_LPO_CAL_ENABLE_LSB 20 220#define SOC_LPO_CAL_ENABLE_LSB 20
218#define SOC_LPO_CAL_ENABLE_MASK 0x00100000 221#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
222#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
223#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
219 224
220#define SOC_CHIP_ID_ADDRESS 0x000000ec 225#define SOC_CHIP_ID_ADDRESS 0x000000ec
221#define SOC_CHIP_ID_REV_LSB 8 226#define SOC_CHIP_ID_REV_LSB 8
@@ -273,6 +278,7 @@ enum ath10k_mcast2ucast_mode {
273#define PCIE_INTR_CAUSE_ADDRESS 0x000c 278#define PCIE_INTR_CAUSE_ADDRESS 0x000c
274#define PCIE_INTR_CLR_ADDRESS 0x0014 279#define PCIE_INTR_CLR_ADDRESS 0x0014
275#define SCRATCH_3_ADDRESS 0x0030 280#define SCRATCH_3_ADDRESS 0x0030
281#define CPU_INTR_ADDRESS 0x0010
276 282
277/* Firmware indications to the Host via SCRATCH_3 register. */ 283/* Firmware indications to the Host via SCRATCH_3 register. */
278#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS) 284#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 776e364eadcd..511a2f81e7af 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -323,13 +323,15 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
323 323
324 ret = ath10k_wmi_peer_create(ar, vdev_id, addr); 324 ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
325 if (ret) { 325 if (ret) {
326 ath10k_warn("Failed to create wmi peer: %i\n", ret); 326 ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
327 addr, vdev_id, ret);
327 return ret; 328 return ret;
328 } 329 }
329 330
330 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 331 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
331 if (ret) { 332 if (ret) {
332 ath10k_warn("Failed to wait for created wmi peer: %i\n", ret); 333 ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
334 addr, vdev_id, ret);
333 return ret; 335 return ret;
334 } 336 }
335 spin_lock_bh(&ar->data_lock); 337 spin_lock_bh(&ar->data_lock);
@@ -339,6 +341,51 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
339 return 0; 341 return 0;
340} 342}
341 343
344static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
345{
346 struct ath10k *ar = arvif->ar;
347 u32 param;
348 int ret;
349
350 param = ar->wmi.pdev_param->sta_kickout_th;
351 ret = ath10k_wmi_pdev_set_param(ar, param,
352 ATH10K_KICKOUT_THRESHOLD);
353 if (ret) {
354 ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
355 arvif->vdev_id, ret);
356 return ret;
357 }
358
359 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
360 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
361 ATH10K_KEEPALIVE_MIN_IDLE);
362 if (ret) {
363 ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
364 arvif->vdev_id, ret);
365 return ret;
366 }
367
368 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
369 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
370 ATH10K_KEEPALIVE_MAX_IDLE);
371 if (ret) {
372 ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
373 arvif->vdev_id, ret);
374 return ret;
375 }
376
377 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
378 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
379 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
380 if (ret) {
381 ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
382 arvif->vdev_id, ret);
383 return ret;
384 }
385
386 return 0;
387}
388
342static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 389static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
343{ 390{
344 struct ath10k *ar = arvif->ar; 391 struct ath10k *ar = arvif->ar;
@@ -444,8 +491,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
444static int ath10k_vdev_start(struct ath10k_vif *arvif) 491static int ath10k_vdev_start(struct ath10k_vif *arvif)
445{ 492{
446 struct ath10k *ar = arvif->ar; 493 struct ath10k *ar = arvif->ar;
447 struct ieee80211_conf *conf = &ar->hw->conf; 494 struct cfg80211_chan_def *chandef = &ar->chandef;
448 struct ieee80211_channel *channel = conf->chandef.chan;
449 struct wmi_vdev_start_request_arg arg = {}; 495 struct wmi_vdev_start_request_arg arg = {};
450 int ret = 0; 496 int ret = 0;
451 497
@@ -457,16 +503,14 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
457 arg.dtim_period = arvif->dtim_period; 503 arg.dtim_period = arvif->dtim_period;
458 arg.bcn_intval = arvif->beacon_interval; 504 arg.bcn_intval = arvif->beacon_interval;
459 505
460 arg.channel.freq = channel->center_freq; 506 arg.channel.freq = chandef->chan->center_freq;
461 507 arg.channel.band_center_freq1 = chandef->center_freq1;
462 arg.channel.band_center_freq1 = conf->chandef.center_freq1; 508 arg.channel.mode = chan_to_phymode(chandef);
463
464 arg.channel.mode = chan_to_phymode(&conf->chandef);
465 509
466 arg.channel.min_power = 0; 510 arg.channel.min_power = 0;
467 arg.channel.max_power = channel->max_power * 2; 511 arg.channel.max_power = chandef->chan->max_power * 2;
468 arg.channel.max_reg_power = channel->max_reg_power * 2; 512 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
469 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 513 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
470 514
471 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 515 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
472 arg.ssid = arvif->u.ap.ssid; 516 arg.ssid = arvif->u.ap.ssid;
@@ -475,7 +519,7 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
475 519
476 /* For now allow DFS for AP mode */ 520 /* For now allow DFS for AP mode */
477 arg.channel.chan_radar = 521 arg.channel.chan_radar =
478 !!(channel->flags & IEEE80211_CHAN_RADAR); 522 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
479 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 523 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
480 arg.ssid = arvif->vif->bss_conf.ssid; 524 arg.ssid = arvif->vif->bss_conf.ssid;
481 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 525 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
@@ -488,13 +532,15 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
488 532
489 ret = ath10k_wmi_vdev_start(ar, &arg); 533 ret = ath10k_wmi_vdev_start(ar, &arg);
490 if (ret) { 534 if (ret) {
491 ath10k_warn("WMI vdev start failed: ret %d\n", ret); 535 ath10k_warn("WMI vdev %i start failed: ret %d\n",
536 arg.vdev_id, ret);
492 return ret; 537 return ret;
493 } 538 }
494 539
495 ret = ath10k_vdev_setup_sync(ar); 540 ret = ath10k_vdev_setup_sync(ar);
496 if (ret) { 541 if (ret) {
497 ath10k_warn("vdev setup failed %d\n", ret); 542 ath10k_warn("vdev %i setup failed %d\n",
543 arg.vdev_id, ret);
498 return ret; 544 return ret;
499 } 545 }
500 546
@@ -512,13 +558,15 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
512 558
513 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 559 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
514 if (ret) { 560 if (ret) {
515 ath10k_warn("WMI vdev stop failed: ret %d\n", ret); 561 ath10k_warn("WMI vdev %i stop failed: ret %d\n",
562 arvif->vdev_id, ret);
516 return ret; 563 return ret;
517 } 564 }
518 565
519 ret = ath10k_vdev_setup_sync(ar); 566 ret = ath10k_vdev_setup_sync(ar);
520 if (ret) { 567 if (ret) {
521 ath10k_warn("vdev setup failed %d\n", ret); 568 ath10k_warn("vdev %i setup sync failed %d\n",
569 arvif->vdev_id, ret);
522 return ret; 570 return ret;
523 } 571 }
524 572
@@ -527,7 +575,8 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
527 575
528static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) 576static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
529{ 577{
530 struct ieee80211_channel *channel = ar->hw->conf.chandef.chan; 578 struct cfg80211_chan_def *chandef = &ar->chandef;
579 struct ieee80211_channel *channel = chandef->chan;
531 struct wmi_vdev_start_request_arg arg = {}; 580 struct wmi_vdev_start_request_arg arg = {};
532 int ret = 0; 581 int ret = 0;
533 582
@@ -540,11 +589,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
540 589
541 arg.vdev_id = vdev_id; 590 arg.vdev_id = vdev_id;
542 arg.channel.freq = channel->center_freq; 591 arg.channel.freq = channel->center_freq;
543 arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; 592 arg.channel.band_center_freq1 = chandef->center_freq1;
544 593
545 /* TODO setup this dynamically, what in case we 594 /* TODO setup this dynamically, what in case we
546 don't have any vifs? */ 595 don't have any vifs? */
547 arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef); 596 arg.channel.mode = chan_to_phymode(chandef);
548 arg.channel.chan_radar = 597 arg.channel.chan_radar =
549 !!(channel->flags & IEEE80211_CHAN_RADAR); 598 !!(channel->flags & IEEE80211_CHAN_RADAR);
550 599
@@ -555,19 +604,22 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
555 604
556 ret = ath10k_wmi_vdev_start(ar, &arg); 605 ret = ath10k_wmi_vdev_start(ar, &arg);
557 if (ret) { 606 if (ret) {
558 ath10k_warn("Monitor vdev start failed: ret %d\n", ret); 607 ath10k_warn("Monitor vdev %i start failed: ret %d\n",
608 vdev_id, ret);
559 return ret; 609 return ret;
560 } 610 }
561 611
562 ret = ath10k_vdev_setup_sync(ar); 612 ret = ath10k_vdev_setup_sync(ar);
563 if (ret) { 613 if (ret) {
564 ath10k_warn("Monitor vdev setup failed %d\n", ret); 614 ath10k_warn("Monitor vdev %i setup failed %d\n",
615 vdev_id, ret);
565 return ret; 616 return ret;
566 } 617 }
567 618
568 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 619 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
569 if (ret) { 620 if (ret) {
570 ath10k_warn("Monitor vdev up failed: %d\n", ret); 621 ath10k_warn("Monitor vdev %i up failed: %d\n",
622 vdev_id, ret);
571 goto vdev_stop; 623 goto vdev_stop;
572 } 624 }
573 625
@@ -579,7 +631,8 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
579vdev_stop: 631vdev_stop:
580 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 632 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
581 if (ret) 633 if (ret)
582 ath10k_warn("Monitor vdev stop failed: %d\n", ret); 634 ath10k_warn("Monitor vdev %i stop failed: %d\n",
635 ar->monitor_vdev_id, ret);
583 636
584 return ret; 637 return ret;
585} 638}
@@ -602,15 +655,18 @@ static int ath10k_monitor_stop(struct ath10k *ar)
602 655
603 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 656 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
604 if (ret) 657 if (ret)
605 ath10k_warn("Monitor vdev down failed: %d\n", ret); 658 ath10k_warn("Monitor vdev %i down failed: %d\n",
659 ar->monitor_vdev_id, ret);
606 660
607 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 661 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
608 if (ret) 662 if (ret)
609 ath10k_warn("Monitor vdev stop failed: %d\n", ret); 663 ath10k_warn("Monitor vdev %i stop failed: %d\n",
664 ar->monitor_vdev_id, ret);
610 665
611 ret = ath10k_vdev_setup_sync(ar); 666 ret = ath10k_vdev_setup_sync(ar);
612 if (ret) 667 if (ret)
613 ath10k_warn("Monitor_down sync failed: %d\n", ret); 668 ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
669 ar->monitor_vdev_id, ret);
614 670
615 ar->monitor_enabled = false; 671 ar->monitor_enabled = false;
616 return ret; 672 return ret;
@@ -640,7 +696,8 @@ static int ath10k_monitor_create(struct ath10k *ar)
640 WMI_VDEV_TYPE_MONITOR, 696 WMI_VDEV_TYPE_MONITOR,
641 0, ar->mac_addr); 697 0, ar->mac_addr);
642 if (ret) { 698 if (ret) {
643 ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret); 699 ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
700 ar->monitor_vdev_id, ret);
644 goto vdev_fail; 701 goto vdev_fail;
645 } 702 }
646 703
@@ -669,7 +726,8 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
669 726
670 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 727 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
671 if (ret) { 728 if (ret) {
672 ath10k_warn("WMI vdev monitor delete failed: %d\n", ret); 729 ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
730 ar->monitor_vdev_id, ret);
673 return ret; 731 return ret;
674 } 732 }
675 733
@@ -791,6 +849,22 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
791 849
792 if (!info->enable_beacon) { 850 if (!info->enable_beacon) {
793 ath10k_vdev_stop(arvif); 851 ath10k_vdev_stop(arvif);
852
853 arvif->is_started = false;
854 arvif->is_up = false;
855
856 spin_lock_bh(&arvif->ar->data_lock);
857 if (arvif->beacon) {
858 dma_unmap_single(arvif->ar->dev,
859 ATH10K_SKB_CB(arvif->beacon)->paddr,
860 arvif->beacon->len, DMA_TO_DEVICE);
861 dev_kfree_skb_any(arvif->beacon);
862
863 arvif->beacon = NULL;
864 arvif->beacon_sent = false;
865 }
866 spin_unlock_bh(&arvif->ar->data_lock);
867
794 return; 868 return;
795 } 869 }
796 870
@@ -800,12 +874,21 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
800 if (ret) 874 if (ret)
801 return; 875 return;
802 876
803 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid); 877 arvif->aid = 0;
878 memcpy(arvif->bssid, info->bssid, ETH_ALEN);
879
880 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
881 arvif->bssid);
804 if (ret) { 882 if (ret) {
805 ath10k_warn("Failed to bring up VDEV: %d\n", 883 ath10k_warn("Failed to bring up vdev %d: %i\n",
806 arvif->vdev_id); 884 arvif->vdev_id, ret);
885 ath10k_vdev_stop(arvif);
807 return; 886 return;
808 } 887 }
888
889 arvif->is_started = true;
890 arvif->is_up = true;
891
809 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 892 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
810} 893}
811 894
@@ -824,18 +907,18 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
824 ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n", 907 ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
825 self_peer, arvif->vdev_id, ret); 908 self_peer, arvif->vdev_id, ret);
826 909
827 if (is_zero_ether_addr(arvif->u.ibss.bssid)) 910 if (is_zero_ether_addr(arvif->bssid))
828 return; 911 return;
829 912
830 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, 913 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
831 arvif->u.ibss.bssid); 914 arvif->bssid);
832 if (ret) { 915 if (ret) {
833 ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n", 916 ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
834 arvif->u.ibss.bssid, arvif->vdev_id, ret); 917 arvif->bssid, arvif->vdev_id, ret);
835 return; 918 return;
836 } 919 }
837 920
838 memset(arvif->u.ibss.bssid, 0, ETH_ALEN); 921 memset(arvif->bssid, 0, ETH_ALEN);
839 922
840 return; 923 return;
841 } 924 }
@@ -878,8 +961,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
878 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 961 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
879 conf->dynamic_ps_timeout); 962 conf->dynamic_ps_timeout);
880 if (ret) { 963 if (ret) {
881 ath10k_warn("Failed to set inactivity time for VDEV: %d\n", 964 ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
882 arvif->vdev_id); 965 arvif->vdev_id, ret);
883 return ret; 966 return ret;
884 } 967 }
885 } else { 968 } else {
@@ -1017,7 +1100,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
1017 struct wmi_peer_assoc_complete_arg *arg) 1100 struct wmi_peer_assoc_complete_arg *arg)
1018{ 1101{
1019 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1102 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1020 int smps;
1021 int i, n; 1103 int i, n;
1022 1104
1023 lockdep_assert_held(&ar->conf_mutex); 1105 lockdep_assert_held(&ar->conf_mutex);
@@ -1063,17 +1145,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
1063 arg->peer_flags |= WMI_PEER_STBC; 1145 arg->peer_flags |= WMI_PEER_STBC;
1064 } 1146 }
1065 1147
1066 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
1067 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
1068
1069 if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
1070 arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
1071 arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
1072 } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
1073 arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
1074 arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
1075 }
1076
1077 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 1148 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
1078 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 1149 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
1079 else if (ht_cap->mcs.rx_mask[1]) 1150 else if (ht_cap->mcs.rx_mask[1])
@@ -1083,8 +1154,23 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
1083 if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8)) 1154 if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
1084 arg->peer_ht_rates.rates[n++] = i; 1155 arg->peer_ht_rates.rates[n++] = i;
1085 1156
1086 arg->peer_ht_rates.num_rates = n; 1157 /*
1087 arg->peer_num_spatial_streams = max((n+7) / 8, 1); 1158 * This is a workaround for HT-enabled STAs which break the spec
1159 * and have no HT capabilities RX mask (no HT RX MCS map).
1160 *
1161 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
1162 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
1163 *
1164 * Firmware asserts if such situation occurs.
1165 */
1166 if (n == 0) {
1167 arg->peer_ht_rates.num_rates = 8;
1168 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
1169 arg->peer_ht_rates.rates[i] = i;
1170 } else {
1171 arg->peer_ht_rates.num_rates = n;
1172 arg->peer_num_spatial_streams = sta->rx_nss;
1173 }
1088 1174
1089 ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 1175 ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
1090 arg->addr, 1176 arg->addr,
@@ -1092,27 +1178,20 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
1092 arg->peer_num_spatial_streams); 1178 arg->peer_num_spatial_streams);
1093} 1179}
1094 1180
1095static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, 1181static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
1096 struct ath10k_vif *arvif, 1182 struct ath10k_vif *arvif,
1097 struct ieee80211_sta *sta, 1183 struct ieee80211_sta *sta)
1098 struct ieee80211_bss_conf *bss_conf,
1099 struct wmi_peer_assoc_complete_arg *arg)
1100{ 1184{
1101 u32 uapsd = 0; 1185 u32 uapsd = 0;
1102 u32 max_sp = 0; 1186 u32 max_sp = 0;
1187 int ret = 0;
1103 1188
1104 lockdep_assert_held(&ar->conf_mutex); 1189 lockdep_assert_held(&ar->conf_mutex);
1105 1190
1106 if (sta->wme)
1107 arg->peer_flags |= WMI_PEER_QOS;
1108
1109 if (sta->wme && sta->uapsd_queues) { 1191 if (sta->wme && sta->uapsd_queues) {
1110 ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 1192 ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
1111 sta->uapsd_queues, sta->max_sp); 1193 sta->uapsd_queues, sta->max_sp);
1112 1194
1113 arg->peer_flags |= WMI_PEER_APSD;
1114 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
1115
1116 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 1195 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
1117 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 1196 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
1118 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 1197 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
@@ -1130,35 +1209,40 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
1130 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 1209 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
1131 max_sp = sta->max_sp; 1210 max_sp = sta->max_sp;
1132 1211
1133 ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 1212 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
1134 sta->addr, 1213 sta->addr,
1135 WMI_AP_PS_PEER_PARAM_UAPSD, 1214 WMI_AP_PS_PEER_PARAM_UAPSD,
1136 uapsd); 1215 uapsd);
1216 if (ret) {
1217 ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
1218 arvif->vdev_id, ret);
1219 return ret;
1220 }
1137 1221
1138 ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 1222 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
1139 sta->addr, 1223 sta->addr,
1140 WMI_AP_PS_PEER_PARAM_MAX_SP, 1224 WMI_AP_PS_PEER_PARAM_MAX_SP,
1141 max_sp); 1225 max_sp);
1226 if (ret) {
1227 ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
1228 arvif->vdev_id, ret);
1229 return ret;
1230 }
1142 1231
1143 /* TODO setup this based on STA listen interval and 1232 /* TODO setup this based on STA listen interval and
1144 beacon interval. Currently we don't know 1233 beacon interval. Currently we don't know
1145 sta->listen_interval - mac80211 patch required. 1234 sta->listen_interval - mac80211 patch required.
1146 Currently use 10 seconds */ 1235 Currently use 10 seconds */
1147 ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 1236 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
1148 sta->addr, 1237 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
1149 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 1238 if (ret) {
1150 10); 1239 ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
1240 arvif->vdev_id, ret);
1241 return ret;
1242 }
1151 } 1243 }
1152}
1153 1244
1154static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar, 1245 return 0;
1155 struct ath10k_vif *arvif,
1156 struct ieee80211_sta *sta,
1157 struct ieee80211_bss_conf *bss_conf,
1158 struct wmi_peer_assoc_complete_arg *arg)
1159{
1160 if (bss_conf->qos)
1161 arg->peer_flags |= WMI_PEER_QOS;
1162} 1246}
1163 1247
1164static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 1248static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
@@ -1211,10 +1295,17 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
1211{ 1295{
1212 switch (arvif->vdev_type) { 1296 switch (arvif->vdev_type) {
1213 case WMI_VDEV_TYPE_AP: 1297 case WMI_VDEV_TYPE_AP:
1214 ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg); 1298 if (sta->wme)
1299 arg->peer_flags |= WMI_PEER_QOS;
1300
1301 if (sta->wme && sta->uapsd_queues) {
1302 arg->peer_flags |= WMI_PEER_APSD;
1303 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
1304 }
1215 break; 1305 break;
1216 case WMI_VDEV_TYPE_STA: 1306 case WMI_VDEV_TYPE_STA:
1217 ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg); 1307 if (bss_conf->qos)
1308 arg->peer_flags |= WMI_PEER_QOS;
1218 break; 1309 break;
1219 default: 1310 default:
1220 break; 1311 break;
@@ -1293,6 +1384,33 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
1293 return 0; 1384 return 0;
1294} 1385}
1295 1386
1387static const u32 ath10k_smps_map[] = {
1388 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
1389 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
1390 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
1391 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
1392};
1393
1394static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
1395 const u8 *addr,
1396 const struct ieee80211_sta_ht_cap *ht_cap)
1397{
1398 int smps;
1399
1400 if (!ht_cap->ht_supported)
1401 return 0;
1402
1403 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
1404 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
1405
1406 if (smps >= ARRAY_SIZE(ath10k_smps_map))
1407 return -EINVAL;
1408
1409 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
1410 WMI_PEER_SMPS_STATE,
1411 ath10k_smps_map[smps]);
1412}
1413
1296/* can be called only in mac80211 callbacks due to `key_count` usage */ 1414/* can be called only in mac80211 callbacks due to `key_count` usage */
1297static void ath10k_bss_assoc(struct ieee80211_hw *hw, 1415static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1298 struct ieee80211_vif *vif, 1416 struct ieee80211_vif *vif,
@@ -1300,6 +1418,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1300{ 1418{
1301 struct ath10k *ar = hw->priv; 1419 struct ath10k *ar = hw->priv;
1302 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 1420 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1421 struct ieee80211_sta_ht_cap ht_cap;
1303 struct wmi_peer_assoc_complete_arg peer_arg; 1422 struct wmi_peer_assoc_complete_arg peer_arg;
1304 struct ieee80211_sta *ap_sta; 1423 struct ieee80211_sta *ap_sta;
1305 int ret; 1424 int ret;
@@ -1310,17 +1429,21 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1310 1429
1311 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 1430 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
1312 if (!ap_sta) { 1431 if (!ap_sta) {
1313 ath10k_warn("Failed to find station entry for %pM\n", 1432 ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
1314 bss_conf->bssid); 1433 bss_conf->bssid, arvif->vdev_id);
1315 rcu_read_unlock(); 1434 rcu_read_unlock();
1316 return; 1435 return;
1317 } 1436 }
1318 1437
1438 /* ap_sta must be accessed only within rcu section which must be left
1439 * before calling ath10k_setup_peer_smps() which might sleep. */
1440 ht_cap = ap_sta->ht_cap;
1441
1319 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, 1442 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
1320 bss_conf, &peer_arg); 1443 bss_conf, &peer_arg);
1321 if (ret) { 1444 if (ret) {
1322 ath10k_warn("Peer assoc prepare failed for %pM\n: %d", 1445 ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
1323 bss_conf->bssid, ret); 1446 bss_conf->bssid, arvif->vdev_id, ret);
1324 rcu_read_unlock(); 1447 rcu_read_unlock();
1325 return; 1448 return;
1326 } 1449 }
@@ -1329,8 +1452,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1329 1452
1330 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1453 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1331 if (ret) { 1454 if (ret) {
1332 ath10k_warn("Peer assoc failed for %pM\n: %d", 1455 ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
1333 bss_conf->bssid, ret); 1456 bss_conf->bssid, arvif->vdev_id, ret);
1457 return;
1458 }
1459
1460 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
1461 if (ret) {
1462 ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
1463 arvif->vdev_id, ret);
1334 return; 1464 return;
1335 } 1465 }
1336 1466
@@ -1338,11 +1468,17 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1338 "mac vdev %d up (associated) bssid %pM aid %d\n", 1468 "mac vdev %d up (associated) bssid %pM aid %d\n",
1339 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 1469 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
1340 1470
1341 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid, 1471 arvif->aid = bss_conf->aid;
1342 bss_conf->bssid); 1472 memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN);
1343 if (ret) 1473
1474 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
1475 if (ret) {
1344 ath10k_warn("VDEV: %d up failed: ret %d\n", 1476 ath10k_warn("VDEV: %d up failed: ret %d\n",
1345 arvif->vdev_id, ret); 1477 arvif->vdev_id, ret);
1478 return;
1479 }
1480
1481 arvif->is_up = true;
1346} 1482}
1347 1483
1348/* 1484/*
@@ -1382,6 +1518,9 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1382 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1518 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1383 1519
1384 arvif->def_wep_key_idx = 0; 1520 arvif->def_wep_key_idx = 0;
1521
1522 arvif->is_started = false;
1523 arvif->is_up = false;
1385} 1524}
1386 1525
1387static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, 1526static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
@@ -1394,21 +1533,35 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1394 1533
1395 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); 1534 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
1396 if (ret) { 1535 if (ret) {
1397 ath10k_warn("WMI peer assoc prepare failed for %pM\n", 1536 ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
1398 sta->addr); 1537 sta->addr, arvif->vdev_id, ret);
1399 return ret; 1538 return ret;
1400 } 1539 }
1401 1540
1402 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1541 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1403 if (ret) { 1542 if (ret) {
1404 ath10k_warn("Peer assoc failed for STA %pM\n: %d", 1543 ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
1405 sta->addr, ret); 1544 sta->addr, arvif->vdev_id, ret);
1545 return ret;
1546 }
1547
1548 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
1549 if (ret) {
1550 ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
1406 return ret; 1551 return ret;
1407 } 1552 }
1408 1553
1409 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 1554 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
1410 if (ret) { 1555 if (ret) {
1411 ath10k_warn("could not install peer wep keys (%d)\n", ret); 1556 ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
1557 arvif->vdev_id, ret);
1558 return ret;
1559 }
1560
1561 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
1562 if (ret) {
1563 ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
1564 sta->addr, arvif->vdev_id, ret);
1412 return ret; 1565 return ret;
1413 } 1566 }
1414 1567
@@ -1424,7 +1577,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
1424 1577
1425 ret = ath10k_clear_peer_keys(arvif, sta->addr); 1578 ret = ath10k_clear_peer_keys(arvif, sta->addr);
1426 if (ret) { 1579 if (ret) {
1427 ath10k_warn("could not clear all peer wep keys (%d)\n", ret); 1580 ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
1581 arvif->vdev_id, ret);
1428 return ret; 1582 return ret;
1429 } 1583 }
1430 1584
@@ -1547,9 +1701,9 @@ static void ath10k_regd_update(struct ath10k *ar)
1547 /* Target allows setting up per-band regdomain but ath_common provides 1701 /* Target allows setting up per-band regdomain but ath_common provides
1548 * a combined one only */ 1702 * a combined one only */
1549 ret = ath10k_wmi_pdev_set_regdomain(ar, 1703 ret = ath10k_wmi_pdev_set_regdomain(ar,
1550 regpair->regDmnEnum, 1704 regpair->reg_domain,
1551 regpair->regDmnEnum, /* 2ghz */ 1705 regpair->reg_domain, /* 2ghz */
1552 regpair->regDmnEnum, /* 5ghz */ 1706 regpair->reg_domain, /* 5ghz */
1553 regpair->reg_2ghz_ctl, 1707 regpair->reg_2ghz_ctl,
1554 regpair->reg_5ghz_ctl); 1708 regpair->reg_5ghz_ctl);
1555 if (ret) 1709 if (ret)
@@ -2100,11 +2254,29 @@ static int ath10k_start(struct ieee80211_hw *hw)
2100 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", 2254 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
2101 ret); 2255 ret);
2102 2256
2257 /*
2258 * By default FW set ARP frames ac to voice (6). In that case ARP
2259 * exchange is not working properly for UAPSD enabled AP. ARP requests
2260 * which arrives with access category 0 are processed by network stack
2261 * and send back with access category 0, but FW changes access category
2262 * to 6. Set ARP frames access category to best effort (0) solves
2263 * this problem.
2264 */
2265
2266 ret = ath10k_wmi_pdev_set_param(ar,
2267 ar->wmi.pdev_param->arp_ac_override, 0);
2268 if (ret) {
2269 ath10k_warn("could not set arp ac override parameter: %d\n",
2270 ret);
2271 goto exit;
2272 }
2273
2103 ath10k_regd_update(ar); 2274 ath10k_regd_update(ar);
2275 ret = 0;
2104 2276
2105exit: 2277exit:
2106 mutex_unlock(&ar->conf_mutex); 2278 mutex_unlock(&ar->conf_mutex);
2107 return 0; 2279 return ret;
2108} 2280}
2109 2281
2110static void ath10k_stop(struct ieee80211_hw *hw) 2282static void ath10k_stop(struct ieee80211_hw *hw)
@@ -2145,6 +2317,98 @@ static int ath10k_config_ps(struct ath10k *ar)
2145 return ret; 2317 return ret;
2146} 2318}
2147 2319
2320static const char *chandef_get_width(enum nl80211_chan_width width)
2321{
2322 switch (width) {
2323 case NL80211_CHAN_WIDTH_20_NOHT:
2324 return "20 (noht)";
2325 case NL80211_CHAN_WIDTH_20:
2326 return "20";
2327 case NL80211_CHAN_WIDTH_40:
2328 return "40";
2329 case NL80211_CHAN_WIDTH_80:
2330 return "80";
2331 case NL80211_CHAN_WIDTH_80P80:
2332 return "80+80";
2333 case NL80211_CHAN_WIDTH_160:
2334 return "160";
2335 case NL80211_CHAN_WIDTH_5:
2336 return "5";
2337 case NL80211_CHAN_WIDTH_10:
2338 return "10";
2339 }
2340 return "?";
2341}
2342
2343static void ath10k_config_chan(struct ath10k *ar)
2344{
2345 struct ath10k_vif *arvif;
2346 bool monitor_was_enabled;
2347 int ret;
2348
2349 lockdep_assert_held(&ar->conf_mutex);
2350
2351 ath10k_dbg(ATH10K_DBG_MAC,
2352 "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
2353 ar->chandef.chan->center_freq,
2354 ar->chandef.center_freq1,
2355 ar->chandef.center_freq2,
2356 chandef_get_width(ar->chandef.width));
2357
2358 /* First stop monitor interface. Some FW versions crash if there's a
2359 * lone monitor interface. */
2360 monitor_was_enabled = ar->monitor_enabled;
2361
2362 if (ar->monitor_enabled)
2363 ath10k_monitor_stop(ar);
2364
2365 list_for_each_entry(arvif, &ar->arvifs, list) {
2366 if (!arvif->is_started)
2367 continue;
2368
2369 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2370 continue;
2371
2372 ret = ath10k_vdev_stop(arvif);
2373 if (ret) {
2374 ath10k_warn("could not stop vdev %d (%d)\n",
2375 arvif->vdev_id, ret);
2376 continue;
2377 }
2378 }
2379
2380 /* all vdevs are now stopped - now attempt to restart them */
2381
2382 list_for_each_entry(arvif, &ar->arvifs, list) {
2383 if (!arvif->is_started)
2384 continue;
2385
2386 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2387 continue;
2388
2389 ret = ath10k_vdev_start(arvif);
2390 if (ret) {
2391 ath10k_warn("could not start vdev %d (%d)\n",
2392 arvif->vdev_id, ret);
2393 continue;
2394 }
2395
2396 if (!arvif->is_up)
2397 continue;
2398
2399 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
2400 arvif->bssid);
2401 if (ret) {
2402 ath10k_warn("could not bring vdev up %d (%d)\n",
2403 arvif->vdev_id, ret);
2404 continue;
2405 }
2406 }
2407
2408 if (monitor_was_enabled)
2409 ath10k_monitor_start(ar, ar->monitor_vdev_id);
2410}
2411
2148static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 2412static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2149{ 2413{
2150 struct ath10k *ar = hw->priv; 2414 struct ath10k *ar = hw->priv;
@@ -2165,6 +2429,11 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2165 spin_unlock_bh(&ar->data_lock); 2429 spin_unlock_bh(&ar->data_lock);
2166 2430
2167 ath10k_config_radar_detection(ar); 2431 ath10k_config_radar_detection(ar);
2432
2433 if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
2434 ar->chandef = conf->chandef;
2435 ath10k_config_chan(ar);
2436 }
2168 } 2437 }
2169 2438
2170 if (changed & IEEE80211_CONF_CHANGE_POWER) { 2439 if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -2214,7 +2483,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2214 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2483 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2215 enum wmi_sta_powersave_param param; 2484 enum wmi_sta_powersave_param param;
2216 int ret = 0; 2485 int ret = 0;
2217 u32 value, param_id; 2486 u32 value;
2218 int bit; 2487 int bit;
2219 u32 vdev_param; 2488 u32 vdev_param;
2220 2489
@@ -2276,7 +2545,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2276 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 2545 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
2277 arvif->vdev_subtype, vif->addr); 2546 arvif->vdev_subtype, vif->addr);
2278 if (ret) { 2547 if (ret) {
2279 ath10k_warn("WMI vdev create failed: ret %d\n", ret); 2548 ath10k_warn("WMI vdev %i create failed: ret %d\n",
2549 arvif->vdev_id, ret);
2280 goto err; 2550 goto err;
2281 } 2551 }
2282 2552
@@ -2287,7 +2557,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2287 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, 2557 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
2288 arvif->def_wep_key_idx); 2558 arvif->def_wep_key_idx);
2289 if (ret) { 2559 if (ret) {
2290 ath10k_warn("Failed to set default keyid: %d\n", ret); 2560 ath10k_warn("Failed to set vdev %i default keyid: %d\n",
2561 arvif->vdev_id, ret);
2291 goto err_vdev_delete; 2562 goto err_vdev_delete;
2292 } 2563 }
2293 2564
@@ -2296,23 +2567,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2296 ATH10K_HW_TXRX_NATIVE_WIFI); 2567 ATH10K_HW_TXRX_NATIVE_WIFI);
2297 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 2568 /* 10.X firmware does not support this VDEV parameter. Do not warn */
2298 if (ret && ret != -EOPNOTSUPP) { 2569 if (ret && ret != -EOPNOTSUPP) {
2299 ath10k_warn("Failed to set TX encap: %d\n", ret); 2570 ath10k_warn("Failed to set vdev %i TX encap: %d\n",
2571 arvif->vdev_id, ret);
2300 goto err_vdev_delete; 2572 goto err_vdev_delete;
2301 } 2573 }
2302 2574
2303 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2575 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2304 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); 2576 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
2305 if (ret) { 2577 if (ret) {
2306 ath10k_warn("Failed to create peer for AP: %d\n", ret); 2578 ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
2579 arvif->vdev_id, ret);
2307 goto err_vdev_delete; 2580 goto err_vdev_delete;
2308 } 2581 }
2309 2582
2310 param_id = ar->wmi.pdev_param->sta_kickout_th; 2583 ret = ath10k_mac_set_kickout(arvif);
2311 2584 if (ret) {
2312 /* Disable STA KICKOUT functionality in FW */ 2585 ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
2313 ret = ath10k_wmi_pdev_set_param(ar, param_id, 0); 2586 arvif->vdev_id, ret);
2314 if (ret) 2587 goto err_peer_delete;
2315 ath10k_warn("Failed to disable STA KICKOUT\n"); 2588 }
2316 } 2589 }
2317 2590
2318 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 2591 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
@@ -2321,7 +2594,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2321 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2594 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2322 param, value); 2595 param, value);
2323 if (ret) { 2596 if (ret) {
2324 ath10k_warn("Failed to set RX wake policy: %d\n", ret); 2597 ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
2598 arvif->vdev_id, ret);
2325 goto err_peer_delete; 2599 goto err_peer_delete;
2326 } 2600 }
2327 2601
@@ -2330,7 +2604,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2330 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2604 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2331 param, value); 2605 param, value);
2332 if (ret) { 2606 if (ret) {
2333 ath10k_warn("Failed to set TX wake thresh: %d\n", ret); 2607 ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
2608 arvif->vdev_id, ret);
2334 goto err_peer_delete; 2609 goto err_peer_delete;
2335 } 2610 }
2336 2611
@@ -2339,7 +2614,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2339 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2614 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2340 param, value); 2615 param, value);
2341 if (ret) { 2616 if (ret) {
2342 ath10k_warn("Failed to set PSPOLL count: %d\n", ret); 2617 ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
2618 arvif->vdev_id, ret);
2343 goto err_peer_delete; 2619 goto err_peer_delete;
2344 } 2620 }
2345 } 2621 }
@@ -2403,17 +2679,19 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2403 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2679 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2404 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); 2680 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
2405 if (ret) 2681 if (ret)
2406 ath10k_warn("Failed to remove peer for AP: %d\n", ret); 2682 ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
2683 arvif->vdev_id, ret);
2407 2684
2408 kfree(arvif->u.ap.noa_data); 2685 kfree(arvif->u.ap.noa_data);
2409 } 2686 }
2410 2687
2411 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n", 2688 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
2412 arvif->vdev_id); 2689 arvif->vdev_id);
2413 2690
2414 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 2691 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
2415 if (ret) 2692 if (ret)
2416 ath10k_warn("WMI vdev delete failed: %d\n", ret); 2693 ath10k_warn("WMI vdev %i delete failed: %d\n",
2694 arvif->vdev_id, ret);
2417 2695
2418 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) 2696 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2419 ar->monitor_present = false; 2697 ar->monitor_present = false;
@@ -2502,8 +2780,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2502 arvif->vdev_id, arvif->beacon_interval); 2780 arvif->vdev_id, arvif->beacon_interval);
2503 2781
2504 if (ret) 2782 if (ret)
2505 ath10k_warn("Failed to set beacon interval for VDEV: %d\n", 2783 ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
2506 arvif->vdev_id); 2784 arvif->vdev_id, ret);
2507 } 2785 }
2508 2786
2509 if (changed & BSS_CHANGED_BEACON) { 2787 if (changed & BSS_CHANGED_BEACON) {
@@ -2515,8 +2793,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2515 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 2793 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
2516 WMI_BEACON_STAGGERED_MODE); 2794 WMI_BEACON_STAGGERED_MODE);
2517 if (ret) 2795 if (ret)
2518 ath10k_warn("Failed to set beacon mode for VDEV: %d\n", 2796 ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
2519 arvif->vdev_id); 2797 arvif->vdev_id, ret);
2520 } 2798 }
2521 2799
2522 if (changed & BSS_CHANGED_BEACON_INFO) { 2800 if (changed & BSS_CHANGED_BEACON_INFO) {
@@ -2530,8 +2808,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2530 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2808 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2531 arvif->dtim_period); 2809 arvif->dtim_period);
2532 if (ret) 2810 if (ret)
2533 ath10k_warn("Failed to set dtim period for VDEV: %d\n", 2811 ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
2534 arvif->vdev_id); 2812 arvif->vdev_id, ret);
2535 } 2813 }
2536 2814
2537 if (changed & BSS_CHANGED_SSID && 2815 if (changed & BSS_CHANGED_SSID &&
@@ -2551,7 +2829,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2551 ret = ath10k_peer_create(ar, arvif->vdev_id, 2829 ret = ath10k_peer_create(ar, arvif->vdev_id,
2552 info->bssid); 2830 info->bssid);
2553 if (ret) 2831 if (ret)
2554 ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n", 2832 ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
2555 info->bssid, arvif->vdev_id, ret); 2833 info->bssid, arvif->vdev_id, ret);
2556 2834
2557 if (vif->type == NL80211_IFTYPE_STATION) { 2835 if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2559,15 +2837,20 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2559 * this is never erased as we it for crypto key 2837 * this is never erased as we it for crypto key
2560 * clearing; this is FW requirement 2838 * clearing; this is FW requirement
2561 */ 2839 */
2562 memcpy(arvif->u.sta.bssid, info->bssid, 2840 memcpy(arvif->bssid, info->bssid, ETH_ALEN);
2563 ETH_ALEN);
2564 2841
2565 ath10k_dbg(ATH10K_DBG_MAC, 2842 ath10k_dbg(ATH10K_DBG_MAC,
2566 "mac vdev %d start %pM\n", 2843 "mac vdev %d start %pM\n",
2567 arvif->vdev_id, info->bssid); 2844 arvif->vdev_id, info->bssid);
2568 2845
2569 /* FIXME: check return value */
2570 ret = ath10k_vdev_start(arvif); 2846 ret = ath10k_vdev_start(arvif);
2847 if (ret) {
2848 ath10k_warn("failed to start vdev %i: %d\n",
2849 arvif->vdev_id, ret);
2850 goto exit;
2851 }
2852
2853 arvif->is_started = true;
2571 } 2854 }
2572 2855
2573 /* 2856 /*
@@ -2576,7 +2859,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2576 * IBSS in order to remove BSSID peer. 2859 * IBSS in order to remove BSSID peer.
2577 */ 2860 */
2578 if (vif->type == NL80211_IFTYPE_ADHOC) 2861 if (vif->type == NL80211_IFTYPE_ADHOC)
2579 memcpy(arvif->u.ibss.bssid, info->bssid, 2862 memcpy(arvif->bssid, info->bssid,
2580 ETH_ALEN); 2863 ETH_ALEN);
2581 } 2864 }
2582 } 2865 }
@@ -2598,8 +2881,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2598 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2881 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2599 cts_prot); 2882 cts_prot);
2600 if (ret) 2883 if (ret)
2601 ath10k_warn("Failed to set CTS prot for VDEV: %d\n", 2884 ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
2602 arvif->vdev_id); 2885 arvif->vdev_id, ret);
2603 } 2886 }
2604 2887
2605 if (changed & BSS_CHANGED_ERP_SLOT) { 2888 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2617,8 +2900,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2617 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2900 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2618 slottime); 2901 slottime);
2619 if (ret) 2902 if (ret)
2620 ath10k_warn("Failed to set erp slot for VDEV: %d\n", 2903 ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
2621 arvif->vdev_id); 2904 arvif->vdev_id, ret);
2622 } 2905 }
2623 2906
2624 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2907 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2636,8 +2919,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2636 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2919 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2637 preamble); 2920 preamble);
2638 if (ret) 2921 if (ret)
2639 ath10k_warn("Failed to set preamble for VDEV: %d\n", 2922 ath10k_warn("Failed to set preamble for vdev %d: %i\n",
2640 arvif->vdev_id); 2923 arvif->vdev_id, ret);
2641 } 2924 }
2642 2925
2643 if (changed & BSS_CHANGED_ASSOC) { 2926 if (changed & BSS_CHANGED_ASSOC) {
@@ -2645,6 +2928,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2645 ath10k_bss_assoc(hw, vif, info); 2928 ath10k_bss_assoc(hw, vif, info);
2646 } 2929 }
2647 2930
2931exit:
2648 mutex_unlock(&ar->conf_mutex); 2932 mutex_unlock(&ar->conf_mutex);
2649} 2933}
2650 2934
@@ -2767,8 +3051,8 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
2767 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3051 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2768 key->keyidx); 3052 key->keyidx);
2769 if (ret) 3053 if (ret)
2770 ath10k_warn("failed to set group key as default key: %d\n", 3054 ath10k_warn("failed to set vdev %i group key as default key: %d\n",
2771 ret); 3055 arvif->vdev_id, ret);
2772} 3056}
2773 3057
2774static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3058static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -2828,7 +3112,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2828 3112
2829 ret = ath10k_install_key(arvif, key, cmd, peer_addr); 3113 ret = ath10k_install_key(arvif, key, cmd, peer_addr);
2830 if (ret) { 3114 if (ret) {
2831 ath10k_warn("ath10k_install_key failed (%d)\n", ret); 3115 ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
3116 arvif->vdev_id, peer_addr, ret);
2832 goto exit; 3117 goto exit;
2833 } 3118 }
2834 3119
@@ -2850,6 +3135,69 @@ exit:
2850 return ret; 3135 return ret;
2851} 3136}
2852 3137
3138static void ath10k_sta_rc_update_wk(struct work_struct *wk)
3139{
3140 struct ath10k *ar;
3141 struct ath10k_vif *arvif;
3142 struct ath10k_sta *arsta;
3143 struct ieee80211_sta *sta;
3144 u32 changed, bw, nss, smps;
3145 int err;
3146
3147 arsta = container_of(wk, struct ath10k_sta, update_wk);
3148 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
3149 arvif = arsta->arvif;
3150 ar = arvif->ar;
3151
3152 spin_lock_bh(&ar->data_lock);
3153
3154 changed = arsta->changed;
3155 arsta->changed = 0;
3156
3157 bw = arsta->bw;
3158 nss = arsta->nss;
3159 smps = arsta->smps;
3160
3161 spin_unlock_bh(&ar->data_lock);
3162
3163 mutex_lock(&ar->conf_mutex);
3164
3165 if (changed & IEEE80211_RC_BW_CHANGED) {
3166 ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
3167 sta->addr, bw);
3168
3169 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
3170 WMI_PEER_CHAN_WIDTH, bw);
3171 if (err)
3172 ath10k_warn("failed to update STA %pM peer bw %d: %d\n",
3173 sta->addr, bw, err);
3174 }
3175
3176 if (changed & IEEE80211_RC_NSS_CHANGED) {
3177 ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
3178 sta->addr, nss);
3179
3180 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
3181 WMI_PEER_NSS, nss);
3182 if (err)
3183 ath10k_warn("failed to update STA %pM nss %d: %d\n",
3184 sta->addr, nss, err);
3185 }
3186
3187 if (changed & IEEE80211_RC_SMPS_CHANGED) {
3188 ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
3189 sta->addr, smps);
3190
3191 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
3192 WMI_PEER_SMPS_STATE, smps);
3193 if (err)
3194 ath10k_warn("failed to update STA %pM smps %d: %d\n",
3195 sta->addr, smps, err);
3196 }
3197
3198 mutex_unlock(&ar->conf_mutex);
3199}
3200
2853static int ath10k_sta_state(struct ieee80211_hw *hw, 3201static int ath10k_sta_state(struct ieee80211_hw *hw,
2854 struct ieee80211_vif *vif, 3202 struct ieee80211_vif *vif,
2855 struct ieee80211_sta *sta, 3203 struct ieee80211_sta *sta,
@@ -2858,9 +3206,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
2858{ 3206{
2859 struct ath10k *ar = hw->priv; 3207 struct ath10k *ar = hw->priv;
2860 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 3208 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3209 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2861 int max_num_peers; 3210 int max_num_peers;
2862 int ret = 0; 3211 int ret = 0;
2863 3212
3213 if (old_state == IEEE80211_STA_NOTEXIST &&
3214 new_state == IEEE80211_STA_NONE) {
3215 memset(arsta, 0, sizeof(*arsta));
3216 arsta->arvif = arvif;
3217 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
3218 }
3219
3220 /* cancel must be done outside the mutex to avoid deadlock */
3221 if ((old_state == IEEE80211_STA_NONE &&
3222 new_state == IEEE80211_STA_NOTEXIST))
3223 cancel_work_sync(&arsta->update_wk);
3224
2864 mutex_lock(&ar->conf_mutex); 3225 mutex_lock(&ar->conf_mutex);
2865 3226
2866 if (old_state == IEEE80211_STA_NOTEXIST && 3227 if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -2899,8 +3260,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
2899 arvif->vdev_id, sta->addr); 3260 arvif->vdev_id, sta->addr);
2900 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 3261 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
2901 if (ret) 3262 if (ret)
2902 ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", 3263 ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
2903 sta->addr, arvif->vdev_id); 3264 sta->addr, arvif->vdev_id, ret);
2904 3265
2905 if (vif->type == NL80211_IFTYPE_STATION) 3266 if (vif->type == NL80211_IFTYPE_STATION)
2906 ath10k_bss_disassoc(hw, vif); 3267 ath10k_bss_disassoc(hw, vif);
@@ -2916,8 +3277,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
2916 3277
2917 ret = ath10k_station_assoc(ar, arvif, sta); 3278 ret = ath10k_station_assoc(ar, arvif, sta);
2918 if (ret) 3279 if (ret)
2919 ath10k_warn("Failed to associate station: %pM\n", 3280 ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
2920 sta->addr); 3281 sta->addr, arvif->vdev_id, ret);
2921 } else if (old_state == IEEE80211_STA_ASSOC && 3282 } else if (old_state == IEEE80211_STA_ASSOC &&
2922 new_state == IEEE80211_STA_AUTH && 3283 new_state == IEEE80211_STA_AUTH &&
2923 (vif->type == NL80211_IFTYPE_AP || 3284 (vif->type == NL80211_IFTYPE_AP ||
@@ -2930,8 +3291,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
2930 3291
2931 ret = ath10k_station_disassoc(ar, arvif, sta); 3292 ret = ath10k_station_disassoc(ar, arvif, sta);
2932 if (ret) 3293 if (ret)
2933 ath10k_warn("Failed to disassociate station: %pM\n", 3294 ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
2934 sta->addr); 3295 sta->addr, arvif->vdev_id, ret);
2935 } 3296 }
2936exit: 3297exit:
2937 mutex_unlock(&ar->conf_mutex); 3298 mutex_unlock(&ar->conf_mutex);
@@ -3212,7 +3573,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
3212 }), ATH10K_FLUSH_TIMEOUT_HZ); 3573 }), ATH10K_FLUSH_TIMEOUT_HZ);
3213 3574
3214 if (ret <= 0 || skip) 3575 if (ret <= 0 || skip)
3215 ath10k_warn("tx not flushed\n"); 3576 ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
3577 skip, ar->state, ret);
3216 3578
3217skip: 3579skip:
3218 mutex_unlock(&ar->conf_mutex); 3580 mutex_unlock(&ar->conf_mutex);
@@ -3234,23 +3596,14 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
3234 struct ath10k *ar = hw->priv; 3596 struct ath10k *ar = hw->priv;
3235 int ret; 3597 int ret;
3236 3598
3237 ar->is_target_paused = false; 3599 mutex_lock(&ar->conf_mutex);
3238 3600
3239 ret = ath10k_wmi_pdev_suspend_target(ar); 3601 ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
3240 if (ret) { 3602 if (ret) {
3241 ath10k_warn("could not suspend target (%d)\n", ret); 3603 if (ret == -ETIMEDOUT)
3242 return 1; 3604 goto resume;
3243 } 3605 ret = 1;
3244 3606 goto exit;
3245 ret = wait_event_interruptible_timeout(ar->event_queue,
3246 ar->is_target_paused == true,
3247 1 * HZ);
3248 if (ret < 0) {
3249 ath10k_warn("suspend interrupted (%d)\n", ret);
3250 goto resume;
3251 } else if (ret == 0) {
3252 ath10k_warn("suspend timed out - target pause event never came\n");
3253 goto resume;
3254 } 3607 }
3255 3608
3256 ret = ath10k_hif_suspend(ar); 3609 ret = ath10k_hif_suspend(ar);
@@ -3259,12 +3612,17 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
3259 goto resume; 3612 goto resume;
3260 } 3613 }
3261 3614
3262 return 0; 3615 ret = 0;
3616 goto exit;
3263resume: 3617resume:
3264 ret = ath10k_wmi_pdev_resume_target(ar); 3618 ret = ath10k_wmi_pdev_resume_target(ar);
3265 if (ret) 3619 if (ret)
3266 ath10k_warn("could not resume target (%d)\n", ret); 3620 ath10k_warn("could not resume target (%d)\n", ret);
3267 return 1; 3621
3622 ret = 1;
3623exit:
3624 mutex_unlock(&ar->conf_mutex);
3625 return ret;
3268} 3626}
3269 3627
3270static int ath10k_resume(struct ieee80211_hw *hw) 3628static int ath10k_resume(struct ieee80211_hw *hw)
@@ -3272,19 +3630,26 @@ static int ath10k_resume(struct ieee80211_hw *hw)
3272 struct ath10k *ar = hw->priv; 3630 struct ath10k *ar = hw->priv;
3273 int ret; 3631 int ret;
3274 3632
3633 mutex_lock(&ar->conf_mutex);
3634
3275 ret = ath10k_hif_resume(ar); 3635 ret = ath10k_hif_resume(ar);
3276 if (ret) { 3636 if (ret) {
3277 ath10k_warn("could not resume hif (%d)\n", ret); 3637 ath10k_warn("could not resume hif (%d)\n", ret);
3278 return 1; 3638 ret = 1;
3639 goto exit;
3279 } 3640 }
3280 3641
3281 ret = ath10k_wmi_pdev_resume_target(ar); 3642 ret = ath10k_wmi_pdev_resume_target(ar);
3282 if (ret) { 3643 if (ret) {
3283 ath10k_warn("could not resume target (%d)\n", ret); 3644 ath10k_warn("could not resume target (%d)\n", ret);
3284 return 1; 3645 ret = 1;
3646 goto exit;
3285 } 3647 }
3286 3648
3287 return 0; 3649 ret = 0;
3650exit:
3651 mutex_unlock(&ar->conf_mutex);
3652 return ret;
3288} 3653}
3289#endif 3654#endif
3290 3655
@@ -3575,7 +3940,8 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
3575 3940
3576static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, 3941static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3577 u8 fixed_rate, 3942 u8 fixed_rate,
3578 u8 fixed_nss) 3943 u8 fixed_nss,
3944 u8 force_sgi)
3579{ 3945{
3580 struct ath10k *ar = arvif->ar; 3946 struct ath10k *ar = arvif->ar;
3581 u32 vdev_param; 3947 u32 vdev_param;
@@ -3584,12 +3950,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3584 mutex_lock(&ar->conf_mutex); 3950 mutex_lock(&ar->conf_mutex);
3585 3951
3586 if (arvif->fixed_rate == fixed_rate && 3952 if (arvif->fixed_rate == fixed_rate &&
3587 arvif->fixed_nss == fixed_nss) 3953 arvif->fixed_nss == fixed_nss &&
3954 arvif->force_sgi == force_sgi)
3588 goto exit; 3955 goto exit;
3589 3956
3590 if (fixed_rate == WMI_FIXED_RATE_NONE) 3957 if (fixed_rate == WMI_FIXED_RATE_NONE)
3591 ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); 3958 ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
3592 3959
3960 if (force_sgi)
3961 ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
3962
3593 vdev_param = ar->wmi.vdev_param->fixed_rate; 3963 vdev_param = ar->wmi.vdev_param->fixed_rate;
3594 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 3964 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
3595 vdev_param, fixed_rate); 3965 vdev_param, fixed_rate);
@@ -3615,6 +3985,19 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3615 3985
3616 arvif->fixed_nss = fixed_nss; 3986 arvif->fixed_nss = fixed_nss;
3617 3987
3988 vdev_param = ar->wmi.vdev_param->sgi;
3989 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
3990 force_sgi);
3991
3992 if (ret) {
3993 ath10k_warn("Could not set sgi param %d: %d\n",
3994 force_sgi, ret);
3995 ret = -EINVAL;
3996 goto exit;
3997 }
3998
3999 arvif->force_sgi = force_sgi;
4000
3618exit: 4001exit:
3619 mutex_unlock(&ar->conf_mutex); 4002 mutex_unlock(&ar->conf_mutex);
3620 return ret; 4003 return ret;
@@ -3629,6 +4012,11 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
3629 enum ieee80211_band band = ar->hw->conf.chandef.chan->band; 4012 enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
3630 u8 fixed_rate = WMI_FIXED_RATE_NONE; 4013 u8 fixed_rate = WMI_FIXED_RATE_NONE;
3631 u8 fixed_nss = ar->num_rf_chains; 4014 u8 fixed_nss = ar->num_rf_chains;
4015 u8 force_sgi;
4016
4017 force_sgi = mask->control[band].gi;
4018 if (force_sgi == NL80211_TXRATE_FORCE_LGI)
4019 return -EINVAL;
3632 4020
3633 if (!ath10k_default_bitrate_mask(ar, band, mask)) { 4021 if (!ath10k_default_bitrate_mask(ar, band, mask)) {
3634 if (!ath10k_get_fixed_rate_nss(mask, band, 4022 if (!ath10k_get_fixed_rate_nss(mask, band,
@@ -3637,7 +4025,113 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
3637 return -EINVAL; 4025 return -EINVAL;
3638 } 4026 }
3639 4027
3640 return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss); 4028 if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
4029 ath10k_warn("Could not force SGI usage for default rate settings\n");
4030 return -EINVAL;
4031 }
4032
4033 return ath10k_set_fixed_rate_param(arvif, fixed_rate,
4034 fixed_nss, force_sgi);
4035}
4036
4037static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
4038 struct ieee80211_vif *vif,
4039 struct cfg80211_chan_def *chandef)
4040{
4041 /* there's no need to do anything here. vif->csa_active is enough */
4042 return;
4043}
4044
4045static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4046 struct ieee80211_vif *vif,
4047 struct ieee80211_sta *sta,
4048 u32 changed)
4049{
4050 struct ath10k *ar = hw->priv;
4051 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
4052 u32 bw, smps;
4053
4054 spin_lock_bh(&ar->data_lock);
4055
4056 ath10k_dbg(ATH10K_DBG_MAC,
4057 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
4058 sta->addr, changed, sta->bandwidth, sta->rx_nss,
4059 sta->smps_mode);
4060
4061 if (changed & IEEE80211_RC_BW_CHANGED) {
4062 bw = WMI_PEER_CHWIDTH_20MHZ;
4063
4064 switch (sta->bandwidth) {
4065 case IEEE80211_STA_RX_BW_20:
4066 bw = WMI_PEER_CHWIDTH_20MHZ;
4067 break;
4068 case IEEE80211_STA_RX_BW_40:
4069 bw = WMI_PEER_CHWIDTH_40MHZ;
4070 break;
4071 case IEEE80211_STA_RX_BW_80:
4072 bw = WMI_PEER_CHWIDTH_80MHZ;
4073 break;
4074 case IEEE80211_STA_RX_BW_160:
4075 ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
4076 sta->addr, sta->bandwidth);
4077 bw = WMI_PEER_CHWIDTH_20MHZ;
4078 break;
4079 }
4080
4081 arsta->bw = bw;
4082 }
4083
4084 if (changed & IEEE80211_RC_NSS_CHANGED)
4085 arsta->nss = sta->rx_nss;
4086
4087 if (changed & IEEE80211_RC_SMPS_CHANGED) {
4088 smps = WMI_PEER_SMPS_PS_NONE;
4089
4090 switch (sta->smps_mode) {
4091 case IEEE80211_SMPS_AUTOMATIC:
4092 case IEEE80211_SMPS_OFF:
4093 smps = WMI_PEER_SMPS_PS_NONE;
4094 break;
4095 case IEEE80211_SMPS_STATIC:
4096 smps = WMI_PEER_SMPS_STATIC;
4097 break;
4098 case IEEE80211_SMPS_DYNAMIC:
4099 smps = WMI_PEER_SMPS_DYNAMIC;
4100 break;
4101 case IEEE80211_SMPS_NUM_MODES:
4102 ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
4103 sta->addr, sta->smps_mode);
4104 smps = WMI_PEER_SMPS_PS_NONE;
4105 break;
4106 }
4107
4108 arsta->smps = smps;
4109 }
4110
4111 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
4112 /* FIXME: Not implemented. Probably the only way to do it would
4113 * be to re-assoc the peer. */
4114 changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
4115 ath10k_dbg(ATH10K_DBG_MAC,
4116 "mac sta rc update for %pM: changing supported rates not implemented\n",
4117 sta->addr);
4118 }
4119
4120 arsta->changed |= changed;
4121
4122 spin_unlock_bh(&ar->data_lock);
4123
4124 ieee80211_queue_work(hw, &arsta->update_wk);
4125}
4126
4127static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4128{
4129 /*
4130 * FIXME: Return 0 for time being. Need to figure out whether FW
4131 * has the API to fetch 64-bit local TSF
4132 */
4133
4134 return 0;
3641} 4135}
3642 4136
3643static const struct ieee80211_ops ath10k_ops = { 4137static const struct ieee80211_ops ath10k_ops = {
@@ -3663,6 +4157,9 @@ static const struct ieee80211_ops ath10k_ops = {
3663 .restart_complete = ath10k_restart_complete, 4157 .restart_complete = ath10k_restart_complete,
3664 .get_survey = ath10k_get_survey, 4158 .get_survey = ath10k_get_survey,
3665 .set_bitrate_mask = ath10k_set_bitrate_mask, 4159 .set_bitrate_mask = ath10k_set_bitrate_mask,
4160 .channel_switch_beacon = ath10k_channel_switch_beacon,
4161 .sta_rc_update = ath10k_sta_rc_update,
4162 .get_tsf = ath10k_get_tsf,
3666#ifdef CONFIG_PM 4163#ifdef CONFIG_PM
3667 .suspend = ath10k_suspend, 4164 .suspend = ath10k_suspend,
3668 .resume = ath10k_resume, 4165 .resume = ath10k_resume,
@@ -3939,7 +4436,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
3939 ath10k_get_arvif_iter, 4436 ath10k_get_arvif_iter,
3940 &arvif_iter); 4437 &arvif_iter);
3941 if (!arvif_iter.arvif) { 4438 if (!arvif_iter.arvif) {
3942 ath10k_warn("No VIF found for VDEV: %d\n", vdev_id); 4439 ath10k_warn("No VIF found for vdev %d\n", vdev_id);
3943 return NULL; 4440 return NULL;
3944 } 4441 }
3945 4442
@@ -4020,7 +4517,8 @@ int ath10k_mac_register(struct ath10k *ar)
4020 IEEE80211_HW_HAS_RATE_CONTROL | 4517 IEEE80211_HW_HAS_RATE_CONTROL |
4021 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 4518 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
4022 IEEE80211_HW_WANT_MONITOR_VIF | 4519 IEEE80211_HW_WANT_MONITOR_VIF |
4023 IEEE80211_HW_AP_LINK_PS; 4520 IEEE80211_HW_AP_LINK_PS |
4521 IEEE80211_HW_SPECTRUM_MGMT;
4024 4522
4025 /* MSDU can have HTT TX fragment pushed in front. The additional 4 4523 /* MSDU can have HTT TX fragment pushed in front. The additional 4
4026 * bytes is used for padding/alignment if necessary. */ 4524 * bytes is used for padding/alignment if necessary. */
@@ -4038,10 +4536,12 @@ int ath10k_mac_register(struct ath10k *ar)
4038 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 4536 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
4039 4537
4040 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 4538 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
4539 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
4041 4540
4042 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 4541 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
4043 4542
4044 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 4543 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
4544 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
4045 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 4545 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
4046 4546
4047 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 4547 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -4076,7 +4576,7 @@ int ath10k_mac_register(struct ath10k *ar)
4076 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 4576 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
4077 ath10k_reg_notifier); 4577 ath10k_reg_notifier);
4078 if (ret) { 4578 if (ret) {
4079 ath10k_err("Regulatory initialization failed\n"); 4579 ath10k_err("Regulatory initialization failed: %i\n", ret);
4080 goto err_free; 4580 goto err_free;
4081 } 4581 }
4082 4582
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 29fd197d1fd8..9d242d801d9d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,13 +58,12 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, 58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 u32 *data); 59 u32 *data);
60 60
61static void ath10k_pci_process_ce(struct ath10k *ar);
62static int ath10k_pci_post_rx(struct ath10k *ar); 61static int ath10k_pci_post_rx(struct ath10k *ar);
63static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, 62static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
64 int num); 63 int num);
65static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); 64static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
66static void ath10k_pci_stop_ce(struct ath10k *ar); 65static int ath10k_pci_cold_reset(struct ath10k *ar);
67static int ath10k_pci_device_reset(struct ath10k *ar); 66static int ath10k_pci_warm_reset(struct ath10k *ar);
68static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 67static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
69static int ath10k_pci_init_irq(struct ath10k *ar); 68static int ath10k_pci_init_irq(struct ath10k *ar);
70static int ath10k_pci_deinit_irq(struct ath10k *ar); 69static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -73,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
73static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 72static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
74 struct ath10k_ce_pipe *rx_pipe, 73 struct ath10k_ce_pipe *rx_pipe,
75 struct bmi_xfer *xfer); 74 struct bmi_xfer *xfer);
76static void ath10k_pci_cleanup_ce(struct ath10k *ar);
77 75
78static const struct ce_attr host_ce_config_wlan[] = { 76static const struct ce_attr host_ce_config_wlan[] = {
79 /* CE0: host->target HTC control and raw streams */ 77 /* CE0: host->target HTC control and raw streams */
@@ -678,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
678 } 676 }
679} 677}
680 678
681/*
682 * FIXME: Handle OOM properly.
683 */
684static inline
685struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
686{
687 struct ath10k_pci_compl *compl = NULL;
688
689 spin_lock_bh(&pipe_info->pipe_lock);
690 if (list_empty(&pipe_info->compl_free)) {
691 ath10k_warn("Completion buffers are full\n");
692 goto exit;
693 }
694 compl = list_first_entry(&pipe_info->compl_free,
695 struct ath10k_pci_compl, list);
696 list_del(&compl->list);
697exit:
698 spin_unlock_bh(&pipe_info->pipe_lock);
699 return compl;
700}
701
702/* Called by lower (CE) layer when a send to Target completes. */ 679/* Called by lower (CE) layer when a send to Target completes. */
703static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) 680static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
704{ 681{
705 struct ath10k *ar = ce_state->ar; 682 struct ath10k *ar = ce_state->ar;
706 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 683 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
707 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 684 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
708 struct ath10k_pci_compl *compl;
709 void *transfer_context; 685 void *transfer_context;
710 u32 ce_data; 686 u32 ce_data;
711 unsigned int nbytes; 687 unsigned int nbytes;
@@ -714,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
714 while (ath10k_ce_completed_send_next(ce_state, &transfer_context, 690 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
715 &ce_data, &nbytes, 691 &ce_data, &nbytes,
716 &transfer_id) == 0) { 692 &transfer_id) == 0) {
717 compl = get_free_compl(pipe_info); 693 /* no need to call tx completion for NULL pointers */
718 if (!compl) 694 if (transfer_context == NULL)
719 break; 695 continue;
720
721 compl->state = ATH10K_PCI_COMPL_SEND;
722 compl->ce_state = ce_state;
723 compl->pipe_info = pipe_info;
724 compl->skb = transfer_context;
725 compl->nbytes = nbytes;
726 compl->transfer_id = transfer_id;
727 compl->flags = 0;
728 696
729 /* 697 cb->tx_completion(ar, transfer_context, transfer_id);
730 * Add the completion to the processing queue.
731 */
732 spin_lock_bh(&ar_pci->compl_lock);
733 list_add_tail(&compl->list, &ar_pci->compl_process);
734 spin_unlock_bh(&ar_pci->compl_lock);
735 } 698 }
736
737 ath10k_pci_process_ce(ar);
738} 699}
739 700
740/* Called by lower (CE) layer when data is received from the Target. */ 701/* Called by lower (CE) layer when data is received from the Target. */
@@ -743,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
743 struct ath10k *ar = ce_state->ar; 704 struct ath10k *ar = ce_state->ar;
744 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
745 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 706 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
746 struct ath10k_pci_compl *compl; 707 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
747 struct sk_buff *skb; 708 struct sk_buff *skb;
748 void *transfer_context; 709 void *transfer_context;
749 u32 ce_data; 710 u32 ce_data;
750 unsigned int nbytes; 711 unsigned int nbytes, max_nbytes;
751 unsigned int transfer_id; 712 unsigned int transfer_id;
752 unsigned int flags; 713 unsigned int flags;
714 int err;
753 715
754 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 716 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
755 &ce_data, &nbytes, &transfer_id, 717 &ce_data, &nbytes, &transfer_id,
756 &flags) == 0) { 718 &flags) == 0) {
757 compl = get_free_compl(pipe_info); 719 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
758 if (!compl) 720 if (unlikely(err)) {
759 break; 721 /* FIXME: retry */
760 722 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
761 compl->state = ATH10K_PCI_COMPL_RECV; 723 pipe_info->pipe_num, err);
762 compl->ce_state = ce_state; 724 }
763 compl->pipe_info = pipe_info;
764 compl->skb = transfer_context;
765 compl->nbytes = nbytes;
766 compl->transfer_id = transfer_id;
767 compl->flags = flags;
768 725
769 skb = transfer_context; 726 skb = transfer_context;
727 max_nbytes = skb->len + skb_tailroom(skb);
770 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 728 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
771 skb->len + skb_tailroom(skb), 729 max_nbytes, DMA_FROM_DEVICE);
772 DMA_FROM_DEVICE); 730
773 /* 731 if (unlikely(max_nbytes < nbytes)) {
774 * Add the completion to the processing queue. 732 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
775 */ 733 nbytes, max_nbytes);
776 spin_lock_bh(&ar_pci->compl_lock); 734 dev_kfree_skb_any(skb);
777 list_add_tail(&compl->list, &ar_pci->compl_process); 735 continue;
778 spin_unlock_bh(&ar_pci->compl_lock); 736 }
779 }
780 737
781 ath10k_pci_process_ce(ar); 738 skb_put(skb, nbytes);
739 cb->rx_completion(ar, skb, pipe_info->pipe_num);
740 }
782} 741}
783 742
784/* Send the first nbytes bytes of the buffer */ 743static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
785static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, 744 struct ath10k_hif_sg_item *items, int n_items)
786 unsigned int transfer_id,
787 unsigned int bytes, struct sk_buff *nbuf)
788{ 745{
789 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
790 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 746 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
791 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]); 747 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
792 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl; 748 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
793 unsigned int len; 749 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
794 u32 flags = 0; 750 unsigned int nentries_mask = src_ring->nentries_mask;
795 int ret; 751 unsigned int sw_index = src_ring->sw_index;
752 unsigned int write_index = src_ring->write_index;
753 int err, i;
796 754
797 len = min(bytes, nbuf->len); 755 spin_lock_bh(&ar_pci->ce_lock);
798 bytes -= len;
799 756
800 if (len & 3) 757 if (unlikely(CE_RING_DELTA(nentries_mask,
801 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); 758 write_index, sw_index - 1) < n_items)) {
759 err = -ENOBUFS;
760 goto unlock;
761 }
802 762
803 ath10k_dbg(ATH10K_DBG_PCI, 763 for (i = 0; i < n_items - 1; i++) {
804 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", 764 ath10k_dbg(ATH10K_DBG_PCI,
805 nbuf->data, (unsigned long long) skb_cb->paddr, 765 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
806 nbuf->len, len); 766 i, items[i].paddr, items[i].len, n_items);
807 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, 767 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
808 "ath10k tx: data: ", 768 items[i].vaddr, items[i].len);
809 nbuf->data, nbuf->len);
810
811 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
812 flags);
813 if (ret)
814 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
815 769
816 return ret; 770 err = ath10k_ce_send_nolock(ce_pipe,
771 items[i].transfer_context,
772 items[i].paddr,
773 items[i].len,
774 items[i].transfer_id,
775 CE_SEND_FLAG_GATHER);
776 if (err)
777 goto unlock;
778 }
779
780 /* `i` is equal to `n_items -1` after for() */
781
782 ath10k_dbg(ATH10K_DBG_PCI,
783 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
784 i, items[i].paddr, items[i].len, n_items);
785 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
786 items[i].vaddr, items[i].len);
787
788 err = ath10k_ce_send_nolock(ce_pipe,
789 items[i].transfer_context,
790 items[i].paddr,
791 items[i].len,
792 items[i].transfer_id,
793 0);
794 if (err)
795 goto unlock;
796
797 err = 0;
798unlock:
799 spin_unlock_bh(&ar_pci->ce_lock);
800 return err;
817} 801}
818 802
819static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 803static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
@@ -833,9 +817,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
833 ath10k_err("firmware crashed!\n"); 817 ath10k_err("firmware crashed!\n");
834 ath10k_err("hardware name %s version 0x%x\n", 818 ath10k_err("hardware name %s version 0x%x\n",
835 ar->hw_params.name, ar->target_version); 819 ar->hw_params.name, ar->target_version);
836 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, 820 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
837 ar->fw_version_minor, ar->fw_version_release,
838 ar->fw_version_build);
839 821
840 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); 822 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
841 ret = ath10k_pci_diag_read_mem(ar, host_addr, 823 ret = ath10k_pci_diag_read_mem(ar, host_addr,
@@ -904,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
904 sizeof(ar_pci->msg_callbacks_current)); 886 sizeof(ar_pci->msg_callbacks_current));
905} 887}
906 888
907static int ath10k_pci_alloc_compl(struct ath10k *ar)
908{
909 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
910 const struct ce_attr *attr;
911 struct ath10k_pci_pipe *pipe_info;
912 struct ath10k_pci_compl *compl;
913 int i, pipe_num, completions;
914
915 spin_lock_init(&ar_pci->compl_lock);
916 INIT_LIST_HEAD(&ar_pci->compl_process);
917
918 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
919 pipe_info = &ar_pci->pipe_info[pipe_num];
920
921 spin_lock_init(&pipe_info->pipe_lock);
922 INIT_LIST_HEAD(&pipe_info->compl_free);
923
924 /* Handle Diagnostic CE specially */
925 if (pipe_info->ce_hdl == ar_pci->ce_diag)
926 continue;
927
928 attr = &host_ce_config_wlan[pipe_num];
929 completions = 0;
930
931 if (attr->src_nentries)
932 completions += attr->src_nentries;
933
934 if (attr->dest_nentries)
935 completions += attr->dest_nentries;
936
937 for (i = 0; i < completions; i++) {
938 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
939 if (!compl) {
940 ath10k_warn("No memory for completion state\n");
941 ath10k_pci_cleanup_ce(ar);
942 return -ENOMEM;
943 }
944
945 compl->state = ATH10K_PCI_COMPL_FREE;
946 list_add_tail(&compl->list, &pipe_info->compl_free);
947 }
948 }
949
950 return 0;
951}
952
953static int ath10k_pci_setup_ce_irq(struct ath10k *ar) 889static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
954{ 890{
955 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 891 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -994,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
994 tasklet_kill(&ar_pci->pipe_info[i].intr); 930 tasklet_kill(&ar_pci->pipe_info[i].intr);
995} 931}
996 932
997static void ath10k_pci_stop_ce(struct ath10k *ar)
998{
999 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1000 struct ath10k_pci_compl *compl;
1001 struct sk_buff *skb;
1002
1003 /* Mark pending completions as aborted, so that upper layers free up
1004 * their associated resources */
1005 spin_lock_bh(&ar_pci->compl_lock);
1006 list_for_each_entry(compl, &ar_pci->compl_process, list) {
1007 skb = compl->skb;
1008 ATH10K_SKB_CB(skb)->is_aborted = true;
1009 }
1010 spin_unlock_bh(&ar_pci->compl_lock);
1011}
1012
1013static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1014{
1015 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1016 struct ath10k_pci_compl *compl, *tmp;
1017 struct ath10k_pci_pipe *pipe_info;
1018 struct sk_buff *netbuf;
1019 int pipe_num;
1020
1021 /* Free pending completions. */
1022 spin_lock_bh(&ar_pci->compl_lock);
1023 if (!list_empty(&ar_pci->compl_process))
1024 ath10k_warn("pending completions still present! possible memory leaks.\n");
1025
1026 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1027 list_del(&compl->list);
1028 netbuf = compl->skb;
1029 dev_kfree_skb_any(netbuf);
1030 kfree(compl);
1031 }
1032 spin_unlock_bh(&ar_pci->compl_lock);
1033
1034 /* Free unused completions for each pipe. */
1035 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1036 pipe_info = &ar_pci->pipe_info[pipe_num];
1037
1038 spin_lock_bh(&pipe_info->pipe_lock);
1039 list_for_each_entry_safe(compl, tmp,
1040 &pipe_info->compl_free, list) {
1041 list_del(&compl->list);
1042 kfree(compl);
1043 }
1044 spin_unlock_bh(&pipe_info->pipe_lock);
1045 }
1046}
1047
1048static void ath10k_pci_process_ce(struct ath10k *ar)
1049{
1050 struct ath10k_pci *ar_pci = ar->hif.priv;
1051 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1052 struct ath10k_pci_compl *compl;
1053 struct sk_buff *skb;
1054 unsigned int nbytes;
1055 int ret, send_done = 0;
1056
1057 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1058 * we must serialize all completion processing. */
1059
1060 spin_lock_bh(&ar_pci->compl_lock);
1061 if (ar_pci->compl_processing) {
1062 spin_unlock_bh(&ar_pci->compl_lock);
1063 return;
1064 }
1065 ar_pci->compl_processing = true;
1066 spin_unlock_bh(&ar_pci->compl_lock);
1067
1068 for (;;) {
1069 spin_lock_bh(&ar_pci->compl_lock);
1070 if (list_empty(&ar_pci->compl_process)) {
1071 spin_unlock_bh(&ar_pci->compl_lock);
1072 break;
1073 }
1074 compl = list_first_entry(&ar_pci->compl_process,
1075 struct ath10k_pci_compl, list);
1076 list_del(&compl->list);
1077 spin_unlock_bh(&ar_pci->compl_lock);
1078
1079 switch (compl->state) {
1080 case ATH10K_PCI_COMPL_SEND:
1081 cb->tx_completion(ar,
1082 compl->skb,
1083 compl->transfer_id);
1084 send_done = 1;
1085 break;
1086 case ATH10K_PCI_COMPL_RECV:
1087 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1088 if (ret) {
1089 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1090 compl->pipe_info->pipe_num, ret);
1091 break;
1092 }
1093
1094 skb = compl->skb;
1095 nbytes = compl->nbytes;
1096
1097 ath10k_dbg(ATH10K_DBG_PCI,
1098 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1099 skb, nbytes);
1100 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1101 "ath10k rx: ", skb->data, nbytes);
1102
1103 if (skb->len + skb_tailroom(skb) >= nbytes) {
1104 skb_trim(skb, 0);
1105 skb_put(skb, nbytes);
1106 cb->rx_completion(ar, skb,
1107 compl->pipe_info->pipe_num);
1108 } else {
1109 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1110 nbytes,
1111 skb->len + skb_tailroom(skb));
1112 }
1113 break;
1114 case ATH10K_PCI_COMPL_FREE:
1115 ath10k_warn("free completion cannot be processed\n");
1116 break;
1117 default:
1118 ath10k_warn("invalid completion state (%d)\n",
1119 compl->state);
1120 break;
1121 }
1122
1123 compl->state = ATH10K_PCI_COMPL_FREE;
1124
1125 /*
1126 * Add completion back to the pipe's free list.
1127 */
1128 spin_lock_bh(&compl->pipe_info->pipe_lock);
1129 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1130 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1131 }
1132
1133 spin_lock_bh(&ar_pci->compl_lock);
1134 ar_pci->compl_processing = false;
1135 spin_unlock_bh(&ar_pci->compl_lock);
1136}
1137
1138/* TODO - temporary mapping while we have too few CE's */ 933/* TODO - temporary mapping while we have too few CE's */
1139static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, 934static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1140 u16 service_id, u8 *ul_pipe, 935 u16 service_id, u8 *ul_pipe,
@@ -1306,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
1306 ath10k_pci_free_early_irq(ar); 1101 ath10k_pci_free_early_irq(ar);
1307 ath10k_pci_kill_tasklet(ar); 1102 ath10k_pci_kill_tasklet(ar);
1308 1103
1309 ret = ath10k_pci_alloc_compl(ar);
1310 if (ret) {
1311 ath10k_warn("failed to allocate CE completions: %d\n", ret);
1312 goto err_early_irq;
1313 }
1314
1315 ret = ath10k_pci_request_irq(ar); 1104 ret = ath10k_pci_request_irq(ar);
1316 if (ret) { 1105 if (ret) {
1317 ath10k_warn("failed to post RX buffers for all pipes: %d\n", 1106 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1318 ret); 1107 ret);
1319 goto err_free_compl; 1108 goto err_early_irq;
1320 } 1109 }
1321 1110
1322 ret = ath10k_pci_setup_ce_irq(ar); 1111 ret = ath10k_pci_setup_ce_irq(ar);
@@ -1340,10 +1129,6 @@ err_stop:
1340 ath10k_ce_disable_interrupts(ar); 1129 ath10k_ce_disable_interrupts(ar);
1341 ath10k_pci_free_irq(ar); 1130 ath10k_pci_free_irq(ar);
1342 ath10k_pci_kill_tasklet(ar); 1131 ath10k_pci_kill_tasklet(ar);
1343 ath10k_pci_stop_ce(ar);
1344 ath10k_pci_process_ce(ar);
1345err_free_compl:
1346 ath10k_pci_cleanup_ce(ar);
1347err_early_irq: 1132err_early_irq:
1348 /* Though there should be no interrupts (device was reset) 1133 /* Though there should be no interrupts (device was reset)
1349 * power_down() expects the early IRQ to be installed as per the 1134 * power_down() expects the early IRQ to be installed as per the
@@ -1414,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1414 1199
1415 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, 1200 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1416 &ce_data, &nbytes, &id) == 0) { 1201 &ce_data, &nbytes, &id) == 0) {
1417 /* 1202 /* no need to call tx completion for NULL pointers */
1418 * Indicate the completion to higer layer to free 1203 if (!netbuf)
1419 * the buffer
1420 */
1421
1422 if (!netbuf) {
1423 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1424 ce_hdl->id);
1425 continue; 1204 continue;
1426 }
1427 1205
1428 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1429 ar_pci->msg_callbacks_current.tx_completion(ar, 1206 ar_pci->msg_callbacks_current.tx_completion(ar,
1430 netbuf, 1207 netbuf,
1431 id); 1208 id);
@@ -1483,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1483 1260
1484 ath10k_pci_free_irq(ar); 1261 ath10k_pci_free_irq(ar);
1485 ath10k_pci_kill_tasklet(ar); 1262 ath10k_pci_kill_tasklet(ar);
1486 ath10k_pci_stop_ce(ar);
1487 1263
1488 ret = ath10k_pci_request_early_irq(ar); 1264 ret = ath10k_pci_request_early_irq(ar);
1489 if (ret) 1265 if (ret)
@@ -1493,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1493 * not DMA nor interrupt. We process the leftovers and then free 1269 * not DMA nor interrupt. We process the leftovers and then free
1494 * everything else up. */ 1270 * everything else up. */
1495 1271
1496 ath10k_pci_process_ce(ar);
1497 ath10k_pci_cleanup_ce(ar);
1498 ath10k_pci_buffer_cleanup(ar); 1272 ath10k_pci_buffer_cleanup(ar);
1499 1273
1500 /* Make the sure the device won't access any structures on the host by 1274 /* Make the sure the device won't access any structures on the host by
@@ -1502,7 +1276,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1502 * configuration during init. If ringbuffers are freed and the device 1276 * configuration during init. If ringbuffers are freed and the device
1503 * were to access them this could lead to memory corruption on the 1277 * were to access them this could lead to memory corruption on the
1504 * host. */ 1278 * host. */
1505 ath10k_pci_device_reset(ar); 1279 ath10k_pci_warm_reset(ar);
1506 1280
1507 ar_pci->started = 0; 1281 ar_pci->started = 0;
1508} 1282}
@@ -1993,7 +1767,94 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1993 ath10k_pci_sleep(ar); 1767 ath10k_pci_sleep(ar);
1994} 1768}
1995 1769
1996static int ath10k_pci_hif_power_up(struct ath10k *ar) 1770static int ath10k_pci_warm_reset(struct ath10k *ar)
1771{
1772 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 int ret = 0;
1774 u32 val;
1775
1776 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
1777
1778 ret = ath10k_do_pci_wake(ar);
1779 if (ret) {
1780 ath10k_err("failed to wake up target: %d\n", ret);
1781 return ret;
1782 }
1783
1784 /* debug */
1785 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1786 PCIE_INTR_CAUSE_ADDRESS);
1787 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1788
1789 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1790 CPU_INTR_ADDRESS);
1791 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1792 val);
1793
1794 /* disable pending irqs */
1795 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1796 PCIE_INTR_ENABLE_ADDRESS, 0);
1797
1798 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1799 PCIE_INTR_CLR_ADDRESS, ~0);
1800
1801 msleep(100);
1802
1803 /* clear fw indicator */
1804 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
1805
1806 /* clear target LF timer interrupts */
1807 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1808 SOC_LF_TIMER_CONTROL0_ADDRESS);
1809 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1810 SOC_LF_TIMER_CONTROL0_ADDRESS,
1811 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1812
1813 /* reset CE */
1814 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1815 SOC_RESET_CONTROL_ADDRESS);
1816 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1817 val | SOC_RESET_CONTROL_CE_RST_MASK);
1818 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1819 SOC_RESET_CONTROL_ADDRESS);
1820 msleep(10);
1821
1822 /* unreset CE */
1823 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1824 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1825 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1826 SOC_RESET_CONTROL_ADDRESS);
1827 msleep(10);
1828
1829 /* debug */
1830 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1831 PCIE_INTR_CAUSE_ADDRESS);
1832 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1833
1834 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1835 CPU_INTR_ADDRESS);
1836 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1837 val);
1838
1839 /* CPU warm reset */
1840 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1841 SOC_RESET_CONTROL_ADDRESS);
1842 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1843 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1844
1845 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1846 SOC_RESET_CONTROL_ADDRESS);
1847 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1848
1849 msleep(100);
1850
1851 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1852
1853 ath10k_do_pci_sleep(ar);
1854 return ret;
1855}
1856
1857static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1997{ 1858{
1998 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1859 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1999 const char *irq_mode; 1860 const char *irq_mode;
@@ -2009,7 +1870,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
2009 * is in an unexpected state. We try to catch that here in order to 1870 * is in an unexpected state. We try to catch that here in order to
2010 * reset the Target and retry the probe. 1871 * reset the Target and retry the probe.
2011 */ 1872 */
2012 ret = ath10k_pci_device_reset(ar); 1873 if (cold_reset)
1874 ret = ath10k_pci_cold_reset(ar);
1875 else
1876 ret = ath10k_pci_warm_reset(ar);
1877
2013 if (ret) { 1878 if (ret) {
2014 ath10k_err("failed to reset target: %d\n", ret); 1879 ath10k_err("failed to reset target: %d\n", ret);
2015 goto err; 1880 goto err;
@@ -2079,7 +1944,7 @@ err_deinit_irq:
2079 ath10k_pci_deinit_irq(ar); 1944 ath10k_pci_deinit_irq(ar);
2080err_ce: 1945err_ce:
2081 ath10k_pci_ce_deinit(ar); 1946 ath10k_pci_ce_deinit(ar);
2082 ath10k_pci_device_reset(ar); 1947 ath10k_pci_warm_reset(ar);
2083err_ps: 1948err_ps:
2084 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 1949 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2085 ath10k_do_pci_sleep(ar); 1950 ath10k_do_pci_sleep(ar);
@@ -2087,6 +1952,34 @@ err:
2087 return ret; 1952 return ret;
2088} 1953}
2089 1954
1955static int ath10k_pci_hif_power_up(struct ath10k *ar)
1956{
1957 int ret;
1958
1959 /*
1960 * Hardware CUS232 version 2 has some issues with cold reset and the
1961 * preferred (and safer) way to perform a device reset is through a
1962 * warm reset.
1963 *
1964 * Warm reset doesn't always work though (notably after a firmware
1965 * crash) so fall back to cold reset if necessary.
1966 */
1967 ret = __ath10k_pci_hif_power_up(ar, false);
1968 if (ret) {
1969 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
1970 ret);
1971
1972 ret = __ath10k_pci_hif_power_up(ar, true);
1973 if (ret) {
1974 ath10k_err("failed to power up target using cold reset too (%d)\n",
1975 ret);
1976 return ret;
1977 }
1978 }
1979
1980 return 0;
1981}
1982
2090static void ath10k_pci_hif_power_down(struct ath10k *ar) 1983static void ath10k_pci_hif_power_down(struct ath10k *ar)
2091{ 1984{
2092 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2094,7 +1987,7 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
2094 ath10k_pci_free_early_irq(ar); 1987 ath10k_pci_free_early_irq(ar);
2095 ath10k_pci_kill_tasklet(ar); 1988 ath10k_pci_kill_tasklet(ar);
2096 ath10k_pci_deinit_irq(ar); 1989 ath10k_pci_deinit_irq(ar);
2097 ath10k_pci_device_reset(ar); 1990 ath10k_pci_warm_reset(ar);
2098 1991
2099 ath10k_pci_ce_deinit(ar); 1992 ath10k_pci_ce_deinit(ar);
2100 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 1993 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2151,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
2151#endif 2044#endif
2152 2045
2153static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2046static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2154 .send_head = ath10k_pci_hif_send_head, 2047 .tx_sg = ath10k_pci_hif_tx_sg,
2155 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 2048 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2156 .start = ath10k_pci_hif_start, 2049 .start = ath10k_pci_hif_start,
2157 .stop = ath10k_pci_hif_stop, 2050 .stop = ath10k_pci_hif_stop,
@@ -2411,11 +2304,10 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
2411 /* Try MSI-X */ 2304 /* Try MSI-X */
2412 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { 2305 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2413 ar_pci->num_msi_intrs = MSI_NUM_REQUEST; 2306 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2414 ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs); 2307 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2415 if (ret == 0) 2308 ar_pci->num_msi_intrs);
2416 return 0;
2417 if (ret > 0) 2309 if (ret > 0)
2418 pci_disable_msi(ar_pci->pdev); 2310 return 0;
2419 2311
2420 /* fall-through */ 2312 /* fall-through */
2421 } 2313 }
@@ -2482,6 +2374,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
2482 case MSI_NUM_REQUEST: 2374 case MSI_NUM_REQUEST:
2483 pci_disable_msi(ar_pci->pdev); 2375 pci_disable_msi(ar_pci->pdev);
2484 return 0; 2376 return 0;
2377 default:
2378 pci_disable_msi(ar_pci->pdev);
2485 } 2379 }
2486 2380
2487 ath10k_warn("unknown irq configuration upon deinit\n"); 2381 ath10k_warn("unknown irq configuration upon deinit\n");
@@ -2523,7 +2417,7 @@ out:
2523 return ret; 2417 return ret;
2524} 2418}
2525 2419
2526static int ath10k_pci_device_reset(struct ath10k *ar) 2420static int ath10k_pci_cold_reset(struct ath10k *ar)
2527{ 2421{
2528 int i, ret; 2422 int i, ret;
2529 u32 val; 2423 u32 val;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index a4f32038c440..b43fdb4f7319 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,23 +43,6 @@ struct bmi_xfer {
43 u32 resp_len; 43 u32 resp_len;
44}; 44};
45 45
46enum ath10k_pci_compl_state {
47 ATH10K_PCI_COMPL_FREE = 0,
48 ATH10K_PCI_COMPL_SEND,
49 ATH10K_PCI_COMPL_RECV,
50};
51
52struct ath10k_pci_compl {
53 struct list_head list;
54 enum ath10k_pci_compl_state state;
55 struct ath10k_ce_pipe *ce_state;
56 struct ath10k_pci_pipe *pipe_info;
57 struct sk_buff *skb;
58 unsigned int nbytes;
59 unsigned int transfer_id;
60 unsigned int flags;
61};
62
63/* 46/*
64 * PCI-specific Target state 47 * PCI-specific Target state
65 * 48 *
@@ -175,9 +158,6 @@ struct ath10k_pci_pipe {
175 /* protects compl_free and num_send_allowed */ 158 /* protects compl_free and num_send_allowed */
176 spinlock_t pipe_lock; 159 spinlock_t pipe_lock;
177 160
178 /* List of free CE completion slots */
179 struct list_head compl_free;
180
181 struct ath10k_pci *ar_pci; 161 struct ath10k_pci *ar_pci;
182 struct tasklet_struct intr; 162 struct tasklet_struct intr;
183}; 163};
@@ -205,14 +185,6 @@ struct ath10k_pci {
205 atomic_t keep_awake_count; 185 atomic_t keep_awake_count;
206 bool verified_awake; 186 bool verified_awake;
207 187
208 /* List of CE completions to be processed */
209 struct list_head compl_process;
210
211 /* protects compl_processing and compl_process */
212 spinlock_t compl_lock;
213
214 bool compl_processing;
215
216 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; 188 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
217 189
218 struct ath10k_hif_cb msg_callbacks_current; 190 struct ath10k_hif_cb msg_callbacks_current;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 74f45fa6f428..0541dd939ce9 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -51,7 +51,8 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
51 struct ieee80211_tx_info *info; 51 struct ieee80211_tx_info *info;
52 struct ath10k_skb_cb *skb_cb; 52 struct ath10k_skb_cb *skb_cb;
53 struct sk_buff *msdu; 53 struct sk_buff *msdu;
54 int ret; 54
55 lockdep_assert_held(&htt->tx_lock);
55 56
56 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", 57 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
57 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); 58 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
@@ -65,12 +66,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
65 msdu = htt->pending_tx[tx_done->msdu_id]; 66 msdu = htt->pending_tx[tx_done->msdu_id];
66 skb_cb = ATH10K_SKB_CB(msdu); 67 skb_cb = ATH10K_SKB_CB(msdu);
67 68
68 ret = ath10k_skb_unmap(dev, msdu); 69 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
69 if (ret)
70 ath10k_warn("data skb unmap failed (%d)\n", ret);
71 70
72 if (skb_cb->htt.frag_len) 71 if (skb_cb->htt.txbuf)
73 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 72 dma_pool_free(htt->tx_pool,
73 skb_cb->htt.txbuf,
74 skb_cb->htt.txbuf_paddr);
74 75
75 ath10k_report_offchan_tx(htt->ar, msdu); 76 ath10k_report_offchan_tx(htt->ar, msdu);
76 77
@@ -92,13 +93,11 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
92 /* we do not own the msdu anymore */ 93 /* we do not own the msdu anymore */
93 94
94exit: 95exit:
95 spin_lock_bh(&htt->tx_lock);
96 htt->pending_tx[tx_done->msdu_id] = NULL; 96 htt->pending_tx[tx_done->msdu_id] = NULL;
97 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); 97 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
98 __ath10k_htt_tx_dec_pending(htt); 98 __ath10k_htt_tx_dec_pending(htt);
99 if (htt->num_pending_tx == 0) 99 if (htt->num_pending_tx == 0)
100 wake_up(&htt->empty_tx_wq); 100 wake_up(&htt->empty_tx_wq);
101 spin_unlock_bh(&htt->tx_lock);
102} 101}
103 102
104static const u8 rx_legacy_rate_idx[] = { 103static const u8 rx_legacy_rate_idx[] = {
@@ -204,7 +203,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
204 break; 203 break;
205 /* 80MHZ */ 204 /* 80MHZ */
206 case 2: 205 case 2:
207 status->flag |= RX_FLAG_80MHZ; 206 status->vht_flag |= RX_VHT_FLAG_80MHZ;
208 } 207 }
209 208
210 status->flag |= RX_FLAG_VHT; 209 status->flag |= RX_FLAG_VHT;
@@ -258,20 +257,26 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
258 status->band = ch->band; 257 status->band = ch->band;
259 status->freq = ch->center_freq; 258 status->freq = ch->center_freq;
260 259
260 if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
261 /* TSF available only in 32-bit */
262 status->mactime = info->tsf & 0xffffffff;
263 status->flag |= RX_FLAG_MACTIME_END;
264 }
265
261 ath10k_dbg(ATH10K_DBG_DATA, 266 ath10k_dbg(ATH10K_DBG_DATA,
262 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n", 267 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
263 info->skb, 268 info->skb,
264 info->skb->len, 269 info->skb->len,
265 status->flag == 0 ? "legacy" : "", 270 status->flag == 0 ? "legacy" : "",
266 status->flag & RX_FLAG_HT ? "ht" : "", 271 status->flag & RX_FLAG_HT ? "ht" : "",
267 status->flag & RX_FLAG_VHT ? "vht" : "", 272 status->flag & RX_FLAG_VHT ? "vht" : "",
268 status->flag & RX_FLAG_40MHZ ? "40" : "", 273 status->flag & RX_FLAG_40MHZ ? "40" : "",
269 status->flag & RX_FLAG_80MHZ ? "80" : "", 274 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
270 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", 275 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
271 status->rate_idx, 276 status->rate_idx,
272 status->vht_nss, 277 status->vht_nss,
273 status->freq, 278 status->freq,
274 status->band); 279 status->band, status->flag, info->fcs_err);
275 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 280 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
276 info->skb->data, info->skb->len); 281 info->skb->data, info->skb->len);
277 282
@@ -378,7 +383,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
378 spin_lock_bh(&ar->data_lock); 383 spin_lock_bh(&ar->data_lock);
379 peer = ath10k_peer_find_by_id(ar, ev->peer_id); 384 peer = ath10k_peer_find_by_id(ar, ev->peer_id);
380 if (!peer) { 385 if (!peer) {
381 ath10k_warn("unknown peer id %d\n", ev->peer_id); 386 ath10k_warn("peer-unmap-event: unknown peer id %d\n",
387 ev->peer_id);
382 goto exit; 388 goto exit;
383 } 389 }
384 390
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 712a606a080a..cb1f7b5bcf4c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -213,7 +213,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
213 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE, 213 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
214 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE, 214 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
215 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, 215 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
216 .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED, 216 .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
217 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, 217 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
218 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, 218 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
219 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, 219 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
@@ -420,7 +420,6 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
420 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 420 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
421 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS, 421 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
422 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 422 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
423 .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
424 .dcs = WMI_PDEV_PARAM_DCS, 423 .dcs = WMI_PDEV_PARAM_DCS,
425 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE, 424 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
426 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD, 425 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -472,8 +471,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
472 .bcnflt_stats_update_period = 471 .bcnflt_stats_update_period =
473 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 472 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
474 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, 473 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
475 .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, 474 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
476 .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
477 .dcs = WMI_10X_PDEV_PARAM_DCS, 475 .dcs = WMI_10X_PDEV_PARAM_DCS,
478 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, 476 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
479 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, 477 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -561,7 +559,6 @@ err_pull:
561 559
562static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) 560static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
563{ 561{
564 struct wmi_bcn_tx_arg arg = {0};
565 int ret; 562 int ret;
566 563
567 lockdep_assert_held(&arvif->ar->data_lock); 564 lockdep_assert_held(&arvif->ar->data_lock);
@@ -569,18 +566,16 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
569 if (arvif->beacon == NULL) 566 if (arvif->beacon == NULL)
570 return; 567 return;
571 568
572 arg.vdev_id = arvif->vdev_id; 569 if (arvif->beacon_sent)
573 arg.tx_rate = 0; 570 return;
574 arg.tx_power = 0;
575 arg.bcn = arvif->beacon->data;
576 arg.bcn_len = arvif->beacon->len;
577 571
578 ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg); 572 ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
579 if (ret) 573 if (ret)
580 return; 574 return;
581 575
582 dev_kfree_skb_any(arvif->beacon); 576 /* We need to retain the arvif->beacon reference for DMA unmapping and
583 arvif->beacon = NULL; 577 * freeing the skbuff later. */
578 arvif->beacon_sent = true;
584} 579}
585 580
586static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, 581static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
@@ -1116,7 +1111,27 @@ static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
1116static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, 1111static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
1117 struct sk_buff *skb) 1112 struct sk_buff *skb)
1118{ 1113{
1119 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n"); 1114 struct wmi_peer_sta_kickout_event *ev;
1115 struct ieee80211_sta *sta;
1116
1117 ev = (struct wmi_peer_sta_kickout_event *)skb->data;
1118
1119 ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
1120 ev->peer_macaddr.addr);
1121
1122 rcu_read_lock();
1123
1124 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
1125 if (!sta) {
1126 ath10k_warn("Spurious quick kickout for STA %pM\n",
1127 ev->peer_macaddr.addr);
1128 goto exit;
1129 }
1130
1131 ieee80211_report_low_ack(sta, 10);
1132
1133exit:
1134 rcu_read_unlock();
1120} 1135}
1121 1136
1122/* 1137/*
@@ -1217,6 +1232,13 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1217 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); 1232 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
1218 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); 1233 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
1219 1234
1235 if (tim->dtim_count == 0) {
1236 ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
1237
1238 if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
1239 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
1240 }
1241
1220 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", 1242 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
1221 tim->dtim_count, tim->dtim_period, 1243 tim->dtim_count, tim->dtim_period,
1222 tim->bitmap_ctrl, pvm_len); 1244 tim->bitmap_ctrl, pvm_len);
@@ -1338,7 +1360,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1338 struct wmi_bcn_info *bcn_info; 1360 struct wmi_bcn_info *bcn_info;
1339 struct ath10k_vif *arvif; 1361 struct ath10k_vif *arvif;
1340 struct sk_buff *bcn; 1362 struct sk_buff *bcn;
1341 int vdev_id = 0; 1363 int ret, vdev_id = 0;
1342 1364
1343 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); 1365 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
1344 1366
@@ -1385,6 +1407,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1385 continue; 1407 continue;
1386 } 1408 }
1387 1409
1410 /* There are no completions for beacons so wait for next SWBA
1411 * before telling mac80211 to decrement CSA counter
1412 *
1413 * Once CSA counter is completed stop sending beacons until
1414 * actual channel switch is done */
1415 if (arvif->vif->csa_active &&
1416 ieee80211_csa_is_complete(arvif->vif)) {
1417 ieee80211_csa_finish(arvif->vif);
1418 continue;
1419 }
1420
1388 bcn = ieee80211_beacon_get(ar->hw, arvif->vif); 1421 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
1389 if (!bcn) { 1422 if (!bcn) {
1390 ath10k_warn("could not get mac80211 beacon\n"); 1423 ath10k_warn("could not get mac80211 beacon\n");
@@ -1396,15 +1429,33 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1396 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); 1429 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
1397 1430
1398 spin_lock_bh(&ar->data_lock); 1431 spin_lock_bh(&ar->data_lock);
1432
1399 if (arvif->beacon) { 1433 if (arvif->beacon) {
1400 ath10k_warn("SWBA overrun on vdev %d\n", 1434 if (!arvif->beacon_sent)
1401 arvif->vdev_id); 1435 ath10k_warn("SWBA overrun on vdev %d\n",
1436 arvif->vdev_id);
1437
1438 dma_unmap_single(arvif->ar->dev,
1439 ATH10K_SKB_CB(arvif->beacon)->paddr,
1440 arvif->beacon->len, DMA_TO_DEVICE);
1402 dev_kfree_skb_any(arvif->beacon); 1441 dev_kfree_skb_any(arvif->beacon);
1403 } 1442 }
1404 1443
1444 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
1445 bcn->data, bcn->len,
1446 DMA_TO_DEVICE);
1447 ret = dma_mapping_error(arvif->ar->dev,
1448 ATH10K_SKB_CB(bcn)->paddr);
1449 if (ret) {
1450 ath10k_warn("failed to map beacon: %d\n", ret);
1451 goto skip;
1452 }
1453
1405 arvif->beacon = bcn; 1454 arvif->beacon = bcn;
1455 arvif->beacon_sent = false;
1406 1456
1407 ath10k_wmi_tx_beacon_nowait(arvif); 1457 ath10k_wmi_tx_beacon_nowait(arvif);
1458skip:
1408 spin_unlock_bh(&ar->data_lock); 1459 spin_unlock_bh(&ar->data_lock);
1409 } 1460 }
1410} 1461}
@@ -2031,11 +2082,11 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
2031 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); 2082 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
2032 2083
2033 ath10k_dbg(ATH10K_DBG_WMI, 2084 ath10k_dbg(ATH10K_DBG_WMI,
2034 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n", 2085 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
2035 __le32_to_cpu(ev->sw_version), 2086 __le32_to_cpu(ev->sw_version),
2036 __le32_to_cpu(ev->abi_version), 2087 __le32_to_cpu(ev->abi_version),
2037 ev->mac_addr.addr, 2088 ev->mac_addr.addr,
2038 __le32_to_cpu(ev->status)); 2089 __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
2039 2090
2040 complete(&ar->wmi.unified_ready); 2091 complete(&ar->wmi.unified_ready);
2041 return 0; 2092 return 0;
@@ -2403,7 +2454,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2403 ar->wmi.cmd->pdev_set_channel_cmdid); 2454 ar->wmi.cmd->pdev_set_channel_cmdid);
2404} 2455}
2405 2456
2406int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) 2457int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
2407{ 2458{
2408 struct wmi_pdev_suspend_cmd *cmd; 2459 struct wmi_pdev_suspend_cmd *cmd;
2409 struct sk_buff *skb; 2460 struct sk_buff *skb;
@@ -2413,7 +2464,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
2413 return -ENOMEM; 2464 return -ENOMEM;
2414 2465
2415 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 2466 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
2416 cmd->suspend_opt = WMI_PDEV_SUSPEND; 2467 cmd->suspend_opt = __cpu_to_le32(suspend_opt);
2417 2468
2418 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 2469 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
2419} 2470}
@@ -3342,7 +3393,6 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
3342 ci->max_power = ch->max_power; 3393 ci->max_power = ch->max_power;
3343 ci->reg_power = ch->max_reg_power; 3394 ci->reg_power = ch->max_reg_power;
3344 ci->antenna_max = ch->max_antenna_gain; 3395 ci->antenna_max = ch->max_antenna_gain;
3345 ci->antenna_max = 0;
3346 3396
3347 /* mode & flags share storage */ 3397 /* mode & flags share storage */
3348 ci->mode = ch->mode; 3398 ci->mode = ch->mode;
@@ -3411,25 +3461,41 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
3411 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 3461 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
3412} 3462}
3413 3463
3414int ath10k_wmi_beacon_send_nowait(struct ath10k *ar, 3464/* This function assumes the beacon is already DMA mapped */
3415 const struct wmi_bcn_tx_arg *arg) 3465int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
3416{ 3466{
3417 struct wmi_bcn_tx_cmd *cmd; 3467 struct wmi_bcn_tx_ref_cmd *cmd;
3418 struct sk_buff *skb; 3468 struct sk_buff *skb;
3469 struct sk_buff *beacon = arvif->beacon;
3470 struct ath10k *ar = arvif->ar;
3471 struct ieee80211_hdr *hdr;
3419 int ret; 3472 int ret;
3473 u16 fc;
3420 3474
3421 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); 3475 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
3422 if (!skb) 3476 if (!skb)
3423 return -ENOMEM; 3477 return -ENOMEM;
3424 3478
3425 cmd = (struct wmi_bcn_tx_cmd *)skb->data; 3479 hdr = (struct ieee80211_hdr *)beacon->data;
3426 cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id); 3480 fc = le16_to_cpu(hdr->frame_control);
3427 cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate); 3481
3428 cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power); 3482 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
3429 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); 3483 cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
3430 memcpy(cmd->bcn, arg->bcn, arg->bcn_len); 3484 cmd->data_len = __cpu_to_le32(beacon->len);
3485 cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
3486 cmd->msdu_id = 0;
3487 cmd->frame_control = __cpu_to_le32(fc);
3488 cmd->flags = 0;
3489
3490 if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
3491 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
3492
3493 if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
3494 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
3495
3496 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
3497 ar->wmi.cmd->pdev_send_bcn_cmdid);
3431 3498
3432 ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
3433 if (ret) 3499 if (ret)
3434 dev_kfree_skb(skb); 3500 dev_kfree_skb(skb);
3435 3501
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4b5e7d3d32b6..4fcc96aa9513 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -2277,7 +2277,6 @@ struct wmi_pdev_param_map {
2277 u32 bcnflt_stats_update_period; 2277 u32 bcnflt_stats_update_period;
2278 u32 pmf_qos; 2278 u32 pmf_qos;
2279 u32 arp_ac_override; 2279 u32 arp_ac_override;
2280 u32 arpdhcp_ac_override;
2281 u32 dcs; 2280 u32 dcs;
2282 u32 ani_enable; 2281 u32 ani_enable;
2283 u32 ani_poll_period; 2282 u32 ani_poll_period;
@@ -3403,6 +3402,24 @@ struct wmi_bcn_tx_arg {
3403 const void *bcn; 3402 const void *bcn;
3404}; 3403};
3405 3404
3405enum wmi_bcn_tx_ref_flags {
3406 WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
3407 WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
3408};
3409
3410struct wmi_bcn_tx_ref_cmd {
3411 __le32 vdev_id;
3412 __le32 data_len;
3413 /* physical address of the frame - dma pointer */
3414 __le32 data_ptr;
3415 /* id for host to track */
3416 __le32 msdu_id;
3417 /* frame ctrl to setup PPDU desc */
3418 __le32 frame_control;
3419 /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
3420 __le32 flags;
3421} __packed;
3422
3406/* Beacon filter */ 3423/* Beacon filter */
3407#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */ 3424#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
3408#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */ 3425#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
@@ -3859,6 +3876,12 @@ enum wmi_peer_smps_state {
3859 WMI_PEER_SMPS_DYNAMIC = 0x2 3876 WMI_PEER_SMPS_DYNAMIC = 0x2
3860}; 3877};
3861 3878
3879enum wmi_peer_chwidth {
3880 WMI_PEER_CHWIDTH_20MHZ = 0,
3881 WMI_PEER_CHWIDTH_40MHZ = 1,
3882 WMI_PEER_CHWIDTH_80MHZ = 2,
3883};
3884
3862enum wmi_peer_param { 3885enum wmi_peer_param {
3863 WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */ 3886 WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
3864 WMI_PEER_AMPDU = 0x2, 3887 WMI_PEER_AMPDU = 0x2,
@@ -4039,6 +4062,10 @@ struct wmi_chan_info_event {
4039 __le32 cycle_count; 4062 __le32 cycle_count;
4040} __packed; 4063} __packed;
4041 4064
4065struct wmi_peer_sta_kickout_event {
4066 struct wmi_mac_addr peer_macaddr;
4067} __packed;
4068
4042#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0) 4069#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
4043 4070
4044/* FIXME: empirically extrapolated */ 4071/* FIXME: empirically extrapolated */
@@ -4172,7 +4199,7 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
4172int ath10k_wmi_connect_htc_service(struct ath10k *ar); 4199int ath10k_wmi_connect_htc_service(struct ath10k *ar);
4173int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 4200int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
4174 const struct wmi_channel_arg *); 4201 const struct wmi_channel_arg *);
4175int ath10k_wmi_pdev_suspend_target(struct ath10k *ar); 4202int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
4176int ath10k_wmi_pdev_resume_target(struct ath10k *ar); 4203int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
4177int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 4204int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
4178 u16 rd5g, u16 ctl2g, u16 ctl5g); 4205 u16 rd5g, u16 ctl2g, u16 ctl5g);
@@ -4219,8 +4246,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
4219 enum wmi_ap_ps_peer_param param_id, u32 value); 4246 enum wmi_ap_ps_peer_param param_id, u32 value);
4220int ath10k_wmi_scan_chan_list(struct ath10k *ar, 4247int ath10k_wmi_scan_chan_list(struct ath10k *ar,
4221 const struct wmi_scan_chan_list_arg *arg); 4248 const struct wmi_scan_chan_list_arg *arg);
4222int ath10k_wmi_beacon_send_nowait(struct ath10k *ar, 4249int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);
4223 const struct wmi_bcn_tx_arg *arg);
4224int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 4250int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
4225 const struct wmi_pdev_set_wmm_params_arg *arg); 4251 const struct wmi_pdev_set_wmm_params_arg *arg);
4226int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); 4252int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index ef35da84f63b..4b18434ba697 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -751,6 +751,9 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
751 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, 751 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
752 DMA_TO_DEVICE); 752 DMA_TO_DEVICE);
753 753
754 if (dma_mapping_error(ah->dev, bf->skbaddr))
755 return -ENOSPC;
756
754 ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates, 757 ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
755 ARRAY_SIZE(bf->rates)); 758 ARRAY_SIZE(bf->rates));
756 759
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 4ee01f654235..afb23b3cc7be 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -681,6 +681,7 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
681 survey->channel = conf->chandef.chan; 681 survey->channel = conf->chandef.chan;
682 survey->noise = ah->ah_noise_floor; 682 survey->noise = ah->ah_noise_floor;
683 survey->filled = SURVEY_INFO_NOISE_DBM | 683 survey->filled = SURVEY_INFO_NOISE_DBM |
684 SURVEY_INFO_IN_USE |
684 SURVEY_INFO_CHANNEL_TIME | 685 SURVEY_INFO_CHANNEL_TIME |
685 SURVEY_INFO_CHANNEL_TIME_BUSY | 686 SURVEY_INFO_CHANNEL_TIME_BUSY |
686 SURVEY_INFO_CHANNEL_TIME_RX | 687 SURVEY_INFO_CHANNEL_TIME_RX |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index fd4c89df67e1..c2c6f4604958 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -790,7 +790,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
790 if (nw_type & ADHOC_NETWORK) { 790 if (nw_type & ADHOC_NETWORK) {
791 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n", 791 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
792 nw_type & ADHOC_CREATOR ? "creator" : "joiner"); 792 nw_type & ADHOC_CREATOR ? "creator" : "joiner");
793 cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL); 793 cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
794 cfg80211_put_bss(ar->wiphy, bss); 794 cfg80211_put_bss(ar->wiphy, bss);
795 return; 795 return;
796 } 796 }
@@ -861,13 +861,9 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
861 } 861 }
862 862
863 if (vif->nw_type & ADHOC_NETWORK) { 863 if (vif->nw_type & ADHOC_NETWORK) {
864 if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) { 864 if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
865 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 865 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
866 "%s: ath6k not in ibss mode\n", __func__); 866 "%s: ath6k not in ibss mode\n", __func__);
867 return;
868 }
869 memset(bssid, 0, ETH_ALEN);
870 cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
871 return; 867 return;
872 } 868 }
873 869
@@ -3256,6 +3252,15 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3256 struct ath6kl_vif *vif = netdev_priv(dev); 3252 struct ath6kl_vif *vif = netdev_priv(dev);
3257 u16 interval; 3253 u16 interval;
3258 int ret, rssi_thold; 3254 int ret, rssi_thold;
3255 int n_match_sets = request->n_match_sets;
3256
3257 /*
3258 * If there's a matchset w/o an SSID, then assume it's just for
3259 * the RSSI (nothing else is currently supported) and ignore it.
3260 * The device only supports a global RSSI filter that we set below.
3261 */
3262 if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
3263 n_match_sets = 0;
3259 3264
3260 if (ar->state != ATH6KL_STATE_ON) 3265 if (ar->state != ATH6KL_STATE_ON)
3261 return -EIO; 3266 return -EIO;
@@ -3268,11 +3273,11 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3268 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, 3273 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
3269 request->n_ssids, 3274 request->n_ssids,
3270 request->match_sets, 3275 request->match_sets,
3271 request->n_match_sets); 3276 n_match_sets);
3272 if (ret < 0) 3277 if (ret < 0)
3273 return ret; 3278 return ret;
3274 3279
3275 if (!request->n_match_sets) { 3280 if (!n_match_sets) {
3276 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, 3281 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
3277 ALL_BSS_FILTER, 0); 3282 ALL_BSS_FILTER, 0);
3278 if (ret < 0) 3283 if (ret < 0)
@@ -3286,12 +3291,12 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3286 3291
3287 if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, 3292 if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
3288 ar->fw_capabilities)) { 3293 ar->fw_capabilities)) {
3289 if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF) 3294 if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
3290 rssi_thold = 0; 3295 rssi_thold = 0;
3291 else if (request->rssi_thold < -127) 3296 else if (request->min_rssi_thold < -127)
3292 rssi_thold = -127; 3297 rssi_thold = -127;
3293 else 3298 else
3294 rssi_thold = request->rssi_thold; 3299 rssi_thold = request->min_rssi_thold;
3295 3300
3296 ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx, 3301 ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
3297 rssi_thold); 3302 rssi_thold);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index f38ff6a6255e..56c3fd5cef65 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -24,7 +24,7 @@
24/* constants */ 24/* constants */
25#define TX_URB_COUNT 32 25#define TX_URB_COUNT 32
26#define RX_URB_COUNT 32 26#define RX_URB_COUNT 32
27#define ATH6KL_USB_RX_BUFFER_SIZE 1700 27#define ATH6KL_USB_RX_BUFFER_SIZE 4096
28 28
29/* tx/rx pipes for usb */ 29/* tx/rx pipes for usb */
30enum ATH6KL_USB_PIPE_ID { 30enum ATH6KL_USB_PIPE_ID {
@@ -481,8 +481,8 @@ static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
481 * ATH6KL_USB_RX_BUFFER_SIZE); 481 * ATH6KL_USB_RX_BUFFER_SIZE);
482 */ 482 */
483 483
484 ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = 484 ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
485 ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2; 485
486 ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA], 486 ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
487 ATH6KL_USB_RX_BUFFER_SIZE); 487 ATH6KL_USB_RX_BUFFER_SIZE);
488} 488}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 4f16d79c9eb1..8b4ce28e3ce8 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -914,7 +914,7 @@ ath6kl_get_regpair(u16 regdmn)
914 return NULL; 914 return NULL;
915 915
916 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) { 916 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
917 if (regDomainPairs[i].regDmnEnum == regdmn) 917 if (regDomainPairs[i].reg_domain == regdmn)
918 return &regDomainPairs[i]; 918 return &regDomainPairs[i];
919 } 919 }
920 920
@@ -954,7 +954,7 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
954 country = ath6kl_regd_find_country_by_rd((u16) reg_code); 954 country = ath6kl_regd_find_country_by_rd((u16) reg_code);
955 if (regpair) 955 if (regpair)
956 ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n", 956 ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
957 regpair->regDmnEnum); 957 regpair->reg_domain);
958 else 958 else
959 ath6kl_warn("Regpair not found reg_code 0x%0x\n", 959 ath6kl_warn("Regpair not found reg_code 0x%0x\n",
960 reg_code); 960 reg_code);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7b96b3e5712d..8fcc029a76a6 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -120,18 +120,6 @@ config ATH9K_WOW
120 This option enables Wake on Wireless LAN support for certain cards. 120 This option enables Wake on Wireless LAN support for certain cards.
121 Currently, AR9462 is supported. 121 Currently, AR9462 is supported.
122 122
123config ATH9K_LEGACY_RATE_CONTROL
124 bool "Atheros ath9k rate control"
125 depends on ATH9K
126 default n
127 ---help---
128 Say Y, if you want to use the ath9k specific rate control
129 module instead of minstrel_ht. Be warned that there are various
130 issues with the ath9k RC and minstrel is a more robust algorithm.
131 Note that even if this option is selected, "ath9k_rate_control"
132 has to be passed to mac80211 using the module parameter,
133 ieee80211_default_rc_algo.
134
135config ATH9K_RFKILL 123config ATH9K_RFKILL
136 bool "Atheros ath9k rfkill support" if EXPERT 124 bool "Atheros ath9k rfkill support" if EXPERT
137 depends on ATH9K 125 depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index a40e5c5d7418..8e1c7b0fe76c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,6 @@ ath9k-y += beacon.o \
8 antenna.o 8 antenna.o
9 9
10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o 10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
11ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
12ath9k-$(CONFIG_ATH9K_PCI) += pci.o 11ath9k-$(CONFIG_ATH9K_PCI) += pci.o
13ath9k-$(CONFIG_ATH9K_AHB) += ahb.o 12ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
14ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o 13ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
@@ -52,7 +51,9 @@ ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
52obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o 51obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
53 52
54obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o 53obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
55ath9k_common-y:= common.o 54ath9k_common-y:= common.o \
55 common-init.o \
56 common-beacon.o
56 57
57ath9k_htc-y += htc_hst.o \ 58ath9k_htc-y += htc_hst.o \
58 hif_usb.o \ 59 hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2dff2765769b..a0398fe3eb28 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -39,6 +39,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
39 .name = "qca955x_wmac", 39 .name = "qca955x_wmac",
40 .driver_data = AR9300_DEVID_QCA955X, 40 .driver_data = AR9300_DEVID_QCA955X,
41 }, 41 },
42 {
43 .name = "qca953x_wmac",
44 .driver_data = AR9300_DEVID_AR953X,
45 },
42 {}, 46 {},
43}; 47};
44 48
@@ -82,6 +86,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
82 int irq; 86 int irq;
83 int ret = 0; 87 int ret = 0;
84 struct ath_hw *ah; 88 struct ath_hw *ah;
89 struct ath_common *common;
85 char hw_name[64]; 90 char hw_name[64];
86 91
87 if (!dev_get_platdata(&pdev->dev)) { 92 if (!dev_get_platdata(&pdev->dev)) {
@@ -124,9 +129,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
124 sc->mem = mem; 129 sc->mem = mem;
125 sc->irq = irq; 130 sc->irq = irq;
126 131
127 /* Will be cleared in ath9k_start() */
128 set_bit(SC_OP_INVALID, &sc->sc_flags);
129
130 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc); 132 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
131 if (ret) { 133 if (ret) {
132 dev_err(&pdev->dev, "request_irq failed\n"); 134 dev_err(&pdev->dev, "request_irq failed\n");
@@ -144,6 +146,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
144 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 146 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
145 hw_name, (unsigned long)mem, irq); 147 hw_name, (unsigned long)mem, irq);
146 148
149 common = ath9k_hw_common(sc->sc_ah);
150 /* Will be cleared in ath9k_start() */
151 set_bit(ATH_OP_INVALID, &common->op_flags);
147 return 0; 152 return 0;
148 153
149 err_irq: 154 err_irq:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index d28923b7435b..6d47783f2e5b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -176,16 +176,26 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
176 if (ah->opmode == NL80211_IFTYPE_STATION && 176 if (ah->opmode == NL80211_IFTYPE_STATION &&
177 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH) 177 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
178 weak_sig = true; 178 weak_sig = true;
179
180 /* 179 /*
181 * OFDM Weak signal detection is always enabled for AP mode. 180 * Newer chipsets are better at dealing with high PHY error counts -
181 * keep weak signal detection enabled when no RSSI threshold is
182 * available to determine if it is needed (mode != STA)
182 */ 183 */
183 if (ah->opmode != NL80211_IFTYPE_AP && 184 else if (AR_SREV_9300_20_OR_LATER(ah) &&
184 aniState->ofdmWeakSigDetect != weak_sig) { 185 ah->opmode != NL80211_IFTYPE_STATION)
185 ath9k_hw_ani_control(ah, 186 weak_sig = true;
186 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, 187
187 entry_ofdm->ofdm_weak_signal_on); 188 /* Older chipsets are more sensitive to high PHY error counts */
188 } 189 else if (!AR_SREV_9300_20_OR_LATER(ah) &&
190 aniState->ofdmNoiseImmunityLevel >= 8)
191 weak_sig = false;
192
193 if (aniState->ofdmWeakSigDetect != weak_sig)
194 ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
195 weak_sig);
196
197 if (!AR_SREV_9300_20_OR_LATER(ah))
198 return;
189 199
190 if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) { 200 if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
191 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; 201 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@@ -308,17 +318,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
308 BUG_ON(aniState == NULL); 318 BUG_ON(aniState == NULL);
309 ah->stats.ast_ani_reset++; 319 ah->stats.ast_ani_reset++;
310 320
311 /* only allow a subset of functions in AP mode */
312 if (ah->opmode == NL80211_IFTYPE_AP) {
313 if (IS_CHAN_2GHZ(chan)) {
314 ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
315 ATH9K_ANI_FIRSTEP_LEVEL);
316 if (AR_SREV_9300_20_OR_LATER(ah))
317 ah->ani_function |= ATH9K_ANI_MRC_CCK;
318 } else
319 ah->ani_function = 0;
320 }
321
322 ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL, 321 ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
323 aniState->ofdmNoiseImmunityLevel); 322 aniState->ofdmNoiseImmunityLevel);
324 cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL, 323 cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
@@ -483,10 +482,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
483 482
484 ath_dbg(common, ANI, "Initialize ANI\n"); 483 ath_dbg(common, ANI, "Initialize ANI\n");
485 484
486 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; 485 if (AR_SREV_9300_20_OR_LATER(ah)) {
487 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW; 486 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
488 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH; 487 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
489 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW; 488 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
489 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
490 } else {
491 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
492 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
493 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
494 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
495 }
490 496
491 ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; 497 ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
492 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; 498 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 21e7b83c3f6a..c40965b4c1e2 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -22,12 +22,16 @@
22/* units are errors per second */ 22/* units are errors per second */
23#define ATH9K_ANI_OFDM_TRIG_HIGH 3500 23#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
24#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 24#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
25#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
25 26
26#define ATH9K_ANI_OFDM_TRIG_LOW 400 27#define ATH9K_ANI_OFDM_TRIG_LOW 400
27#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 28#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
29#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
28 30
29#define ATH9K_ANI_CCK_TRIG_HIGH 600 31#define ATH9K_ANI_CCK_TRIG_HIGH 600
32#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
30#define ATH9K_ANI_CCK_TRIG_LOW 300 33#define ATH9K_ANI_CCK_TRIG_LOW 300
34#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
31 35
32#define ATH9K_ANI_SPUR_IMMUNE_LVL 3 36#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
33#define ATH9K_ANI_FIRSTEP_LVL 2 37#define ATH9K_ANI_FIRSTEP_LVL 2
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ff415e863ee9..3b3e91057a4c 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -26,10 +26,6 @@ static const int firstep_table[] =
26/* level: 0 1 2 3 4 5 6 7 8 */ 26/* level: 0 1 2 3 4 5 6 7 8 */
27 { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */ 27 { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
28 28
29static const int cycpwrThr1_table[] =
30/* level: 0 1 2 3 4 5 6 7 8 */
31 { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
32
33/* 29/*
34 * register values to turn OFDM weak signal detection OFF 30 * register values to turn OFDM weak signal detection OFF
35 */ 31 */
@@ -921,7 +917,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
921 struct ath_common *common = ath9k_hw_common(ah); 917 struct ath_common *common = ath9k_hw_common(ah);
922 struct ath9k_channel *chan = ah->curchan; 918 struct ath9k_channel *chan = ah->curchan;
923 struct ar5416AniState *aniState = &ah->ani; 919 struct ar5416AniState *aniState = &ah->ani;
924 s32 value, value2; 920 s32 value;
925 921
926 switch (cmd & ah->ani_function) { 922 switch (cmd & ah->ani_function) {
927 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ 923 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
@@ -1008,42 +1004,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1008 case ATH9K_ANI_FIRSTEP_LEVEL:{ 1004 case ATH9K_ANI_FIRSTEP_LEVEL:{
1009 u32 level = param; 1005 u32 level = param;
1010 1006
1011 if (level >= ARRAY_SIZE(firstep_table)) { 1007 value = level * 2;
1012 ath_dbg(common, ANI,
1013 "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
1014 level, ARRAY_SIZE(firstep_table));
1015 return false;
1016 }
1017
1018 /*
1019 * make register setting relative to default
1020 * from INI file & cap value
1021 */
1022 value = firstep_table[level] -
1023 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
1024 aniState->iniDef.firstep;
1025 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1026 value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
1027 if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
1028 value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
1029 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 1008 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1030 AR_PHY_FIND_SIG_FIRSTEP, 1009 AR_PHY_FIND_SIG_FIRSTEP, value);
1031 value);
1032 /*
1033 * we need to set first step low register too
1034 * make register setting relative to default
1035 * from INI file & cap value
1036 */
1037 value2 = firstep_table[level] -
1038 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
1039 aniState->iniDef.firstepLow;
1040 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1041 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
1042 if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
1043 value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
1044
1045 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, 1010 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
1046 AR_PHY_FIND_SIG_FIRSTEP_LOW, value2); 1011 AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
1047 1012
1048 if (level != aniState->firstepLevel) { 1013 if (level != aniState->firstepLevel) {
1049 ath_dbg(common, ANI, 1014 ath_dbg(common, ANI,
@@ -1060,7 +1025,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1060 aniState->firstepLevel, 1025 aniState->firstepLevel,
1061 level, 1026 level,
1062 ATH9K_ANI_FIRSTEP_LVL, 1027 ATH9K_ANI_FIRSTEP_LVL,
1063 value2, 1028 value,
1064 aniState->iniDef.firstepLow); 1029 aniState->iniDef.firstepLow);
1065 if (level > aniState->firstepLevel) 1030 if (level > aniState->firstepLevel)
1066 ah->stats.ast_ani_stepup++; 1031 ah->stats.ast_ani_stepup++;
@@ -1073,41 +1038,13 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1073 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ 1038 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1074 u32 level = param; 1039 u32 level = param;
1075 1040
1076 if (level >= ARRAY_SIZE(cycpwrThr1_table)) { 1041 value = (level + 1) * 2;
1077 ath_dbg(common, ANI,
1078 "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
1079 level, ARRAY_SIZE(cycpwrThr1_table));
1080 return false;
1081 }
1082 /*
1083 * make register setting relative to default
1084 * from INI file & cap value
1085 */
1086 value = cycpwrThr1_table[level] -
1087 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
1088 aniState->iniDef.cycpwrThr1;
1089 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1090 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
1091 if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
1092 value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
1093 REG_RMW_FIELD(ah, AR_PHY_TIMING5, 1042 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
1094 AR_PHY_TIMING5_CYCPWR_THR1, 1043 AR_PHY_TIMING5_CYCPWR_THR1, value);
1095 value);
1096 1044
1097 /* 1045 if (IS_CHAN_HT40(ah->curchan))
1098 * set AR_PHY_EXT_CCA for extension channel 1046 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
1099 * make register setting relative to default 1047 AR_PHY_EXT_TIMING5_CYCPWR_THR1, value);
1100 * from INI file & cap value
1101 */
1102 value2 = cycpwrThr1_table[level] -
1103 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
1104 aniState->iniDef.cycpwrThr1Ext;
1105 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1106 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
1107 if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
1108 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
1109 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
1110 AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
1111 1048
1112 if (level != aniState->spurImmunityLevel) { 1049 if (level != aniState->spurImmunityLevel) {
1113 ath_dbg(common, ANI, 1050 ath_dbg(common, ANI,
@@ -1124,7 +1061,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1124 aniState->spurImmunityLevel, 1061 aniState->spurImmunityLevel,
1125 level, 1062 level,
1126 ATH9K_ANI_SPUR_IMMUNE_LVL, 1063 ATH9K_ANI_SPUR_IMMUNE_LVL,
1127 value2, 1064 value,
1128 aniState->iniDef.cycpwrThr1Ext); 1065 aniState->iniDef.cycpwrThr1Ext);
1129 if (level > aniState->spurImmunityLevel) 1066 if (level > aniState->spurImmunityLevel)
1130 ah->stats.ast_ani_spurup++; 1067 ah->stats.ast_ani_spurup++;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index a352128c40ad..ac8301ef5242 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -23,10 +23,11 @@
23#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT 23#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
24#define MAX_MAG_DELTA 11 24#define MAX_MAG_DELTA 11
25#define MAX_PHS_DELTA 10 25#define MAX_PHS_DELTA 10
26#define MAXIQCAL 3
26 27
27struct coeff { 28struct coeff {
28 int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT]; 29 int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
29 int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT]; 30 int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
30 int iqc_coeff[2]; 31 int iqc_coeff[2];
31}; 32};
32 33
@@ -655,9 +656,6 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
655 if (i2_m_q2_a0_d1 > 0x800) 656 if (i2_m_q2_a0_d1 > 0x800)
656 i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1); 657 i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
657 658
658 if (i2_p_q2_a0_d1 > 0x1000)
659 i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
660
661 if (iq_corr_a0_d1 > 0x800) 659 if (iq_corr_a0_d1 > 0x800)
662 iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1); 660 iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
663 661
@@ -800,7 +798,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
800 if (q_q_coff > 63) 798 if (q_q_coff > 63)
801 q_q_coff = 63; 799 q_q_coff = 63;
802 800
803 iqc_coeff[0] = (q_q_coff * 128) + q_i_coff; 801 iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
804 802
805 ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n", 803 ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
806 chain_idx, iqc_coeff[0]); 804 chain_idx, iqc_coeff[0]);
@@ -831,7 +829,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
831 if (q_q_coff > 63) 829 if (q_q_coff > 63)
832 q_q_coff = 63; 830 q_q_coff = 63;
833 831
834 iqc_coeff[1] = (q_q_coff * 128) + q_i_coff; 832 iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
835 833
836 ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n", 834 ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
837 chain_idx, iqc_coeff[1]); 835 chain_idx, iqc_coeff[1]);
@@ -839,7 +837,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
839 return true; 837 return true;
840} 838}
841 839
842static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement, 840static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
841 int nmeasurement,
843 int max_delta) 842 int max_delta)
844{ 843{
845 int mp_max = -64, max_idx = 0; 844 int mp_max = -64, max_idx = 0;
@@ -848,20 +847,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
848 847
849 /* find min/max mismatch across all calibrated gains */ 848 /* find min/max mismatch across all calibrated gains */
850 for (i = 0; i < nmeasurement; i++) { 849 for (i = 0; i < nmeasurement; i++) {
851 if (mp_coeff[i] > mp_max) { 850 if (mp_coeff[i][0] > mp_max) {
852 mp_max = mp_coeff[i]; 851 mp_max = mp_coeff[i][0];
853 max_idx = i; 852 max_idx = i;
854 } else if (mp_coeff[i] < mp_min) { 853 } else if (mp_coeff[i][0] < mp_min) {
855 mp_min = mp_coeff[i]; 854 mp_min = mp_coeff[i][0];
856 min_idx = i; 855 min_idx = i;
857 } 856 }
858 } 857 }
859 858
860 /* find average (exclude max abs value) */ 859 /* find average (exclude max abs value) */
861 for (i = 0; i < nmeasurement; i++) { 860 for (i = 0; i < nmeasurement; i++) {
862 if ((abs(mp_coeff[i]) < abs(mp_max)) || 861 if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
863 (abs(mp_coeff[i]) < abs(mp_min))) { 862 (abs(mp_coeff[i][0]) < abs(mp_min))) {
864 mp_avg += mp_coeff[i]; 863 mp_avg += mp_coeff[i][0];
865 mp_count++; 864 mp_count++;
866 } 865 }
867 } 866 }
@@ -873,7 +872,7 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
873 if (mp_count) 872 if (mp_count)
874 mp_avg /= mp_count; 873 mp_avg /= mp_count;
875 else 874 else
876 mp_avg = mp_coeff[nmeasurement - 1]; 875 mp_avg = mp_coeff[nmeasurement - 1][0];
877 876
878 /* detect outlier */ 877 /* detect outlier */
879 if (abs(mp_max - mp_min) > max_delta) { 878 if (abs(mp_max - mp_min) > max_delta) {
@@ -882,15 +881,16 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
882 else 881 else
883 outlier_idx = min_idx; 882 outlier_idx = min_idx;
884 883
885 mp_coeff[outlier_idx] = mp_avg; 884 mp_coeff[outlier_idx][0] = mp_avg;
886 } 885 }
887} 886}
888 887
889static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah, 888static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
890 struct coeff *coeff, 889 struct coeff *coeff,
891 bool is_reusable) 890 bool is_reusable)
892{ 891{
893 int i, im, nmeasurement; 892 int i, im, nmeasurement;
893 int magnitude, phase;
894 u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS]; 894 u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
895 struct ath9k_hw_cal_data *caldata = ah->caldata; 895 struct ath9k_hw_cal_data *caldata = ah->caldata;
896 896
@@ -920,21 +920,30 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
920 if (nmeasurement > MAX_MEASUREMENT) 920 if (nmeasurement > MAX_MEASUREMENT)
921 nmeasurement = MAX_MEASUREMENT; 921 nmeasurement = MAX_MEASUREMENT;
922 922
923 /* detect outlier only if nmeasurement > 1 */ 923 /*
924 if (nmeasurement > 1) { 924 * Skip normal outlier detection for AR9550.
925 /* Detect magnitude outlier */ 925 */
926 ar9003_hw_detect_outlier(coeff->mag_coeff[i], 926 if (!AR_SREV_9550(ah)) {
927 nmeasurement, MAX_MAG_DELTA); 927 /* detect outlier only if nmeasurement > 1 */
928 928 if (nmeasurement > 1) {
929 /* Detect phase outlier */ 929 /* Detect magnitude outlier */
930 ar9003_hw_detect_outlier(coeff->phs_coeff[i], 930 ar9003_hw_detect_outlier(coeff->mag_coeff[i],
931 nmeasurement, MAX_PHS_DELTA); 931 nmeasurement,
932 MAX_MAG_DELTA);
933
934 /* Detect phase outlier */
935 ar9003_hw_detect_outlier(coeff->phs_coeff[i],
936 nmeasurement,
937 MAX_PHS_DELTA);
938 }
932 } 939 }
933 940
934 for (im = 0; im < nmeasurement; im++) { 941 for (im = 0; im < nmeasurement; im++) {
942 magnitude = coeff->mag_coeff[i][im][0];
943 phase = coeff->phs_coeff[i][im][0];
935 944
936 coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) | 945 coeff->iqc_coeff[0] =
937 ((coeff->phs_coeff[i][im] & 0x7f) << 7); 946 (phase & 0x7f) | ((magnitude & 0x7f) << 7);
938 947
939 if ((im % 2) == 0) 948 if ((im % 2) == 0)
940 REG_RMW_FIELD(ah, tx_corr_coeff[im][i], 949 REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@@ -991,7 +1000,63 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
991 return true; 1000 return true;
992} 1001}
993 1002
994static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable) 1003static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
1004 struct coeff *coeff,
1005 int i, int nmeasurement)
1006{
1007 struct ath_common *common = ath9k_hw_common(ah);
1008 int im, ix, iy, temp;
1009
1010 for (im = 0; im < nmeasurement; im++) {
1011 for (ix = 0; ix < MAXIQCAL - 1; ix++) {
1012 for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
1013 if (coeff->mag_coeff[i][im][iy] <
1014 coeff->mag_coeff[i][im][ix]) {
1015 temp = coeff->mag_coeff[i][im][ix];
1016 coeff->mag_coeff[i][im][ix] =
1017 coeff->mag_coeff[i][im][iy];
1018 coeff->mag_coeff[i][im][iy] = temp;
1019 }
1020 if (coeff->phs_coeff[i][im][iy] <
1021 coeff->phs_coeff[i][im][ix]) {
1022 temp = coeff->phs_coeff[i][im][ix];
1023 coeff->phs_coeff[i][im][ix] =
1024 coeff->phs_coeff[i][im][iy];
1025 coeff->phs_coeff[i][im][iy] = temp;
1026 }
1027 }
1028 }
1029 coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
1030 coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
1031
1032 ath_dbg(common, CALIBRATE,
1033 "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
1034 i, im,
1035 coeff->mag_coeff[i][im][0],
1036 coeff->phs_coeff[i][im][0]);
1037 }
1038}
1039
1040static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
1041 struct coeff *coeff,
1042 int iqcal_idx,
1043 int nmeasurement)
1044{
1045 int i;
1046
1047 if ((iqcal_idx + 1) != MAXIQCAL)
1048 return false;
1049
1050 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
1051 __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
1052 }
1053
1054 return true;
1055}
1056
1057static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
1058 int iqcal_idx,
1059 bool is_reusable)
995{ 1060{
996 struct ath_common *common = ath9k_hw_common(ah); 1061 struct ath_common *common = ath9k_hw_common(ah);
997 const u32 txiqcal_status[AR9300_MAX_CHAINS] = { 1062 const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@@ -1004,10 +1069,11 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
1004 AR_PHY_CHAN_INFO_TAB_1, 1069 AR_PHY_CHAN_INFO_TAB_1,
1005 AR_PHY_CHAN_INFO_TAB_2, 1070 AR_PHY_CHAN_INFO_TAB_2,
1006 }; 1071 };
1007 struct coeff coeff; 1072 static struct coeff coeff;
1008 s32 iq_res[6]; 1073 s32 iq_res[6];
1009 int i, im, j; 1074 int i, im, j;
1010 int nmeasurement; 1075 int nmeasurement = 0;
1076 bool outlier_detect = true;
1011 1077
1012 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 1078 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
1013 if (!(ah->txchainmask & (1 << i))) 1079 if (!(ah->txchainmask & (1 << i)))
@@ -1065,17 +1131,23 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
1065 goto tx_iqcal_fail; 1131 goto tx_iqcal_fail;
1066 } 1132 }
1067 1133
1068 coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f; 1134 coeff.phs_coeff[i][im][iqcal_idx] =
1069 coeff.phs_coeff[i][im] = 1135 coeff.iqc_coeff[0] & 0x7f;
1136 coeff.mag_coeff[i][im][iqcal_idx] =
1070 (coeff.iqc_coeff[0] >> 7) & 0x7f; 1137 (coeff.iqc_coeff[0] >> 7) & 0x7f;
1071 1138
1072 if (coeff.mag_coeff[i][im] > 63) 1139 if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
1073 coeff.mag_coeff[i][im] -= 128; 1140 coeff.mag_coeff[i][im][iqcal_idx] -= 128;
1074 if (coeff.phs_coeff[i][im] > 63) 1141 if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
1075 coeff.phs_coeff[i][im] -= 128; 1142 coeff.phs_coeff[i][im][iqcal_idx] -= 128;
1076 } 1143 }
1077 } 1144 }
1078 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable); 1145
1146 if (AR_SREV_9550(ah))
1147 outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
1148 iqcal_idx, nmeasurement);
1149 if (outlier_detect)
1150 ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
1079 1151
1080 return; 1152 return;
1081 1153
@@ -1409,7 +1481,7 @@ skip_tx_iqcal:
1409 } 1481 }
1410 1482
1411 if (txiqcal_done) 1483 if (txiqcal_done)
1412 ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable); 1484 ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
1413 else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags)) 1485 else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
1414 ar9003_hw_tx_iq_cal_reload(ah); 1486 ar9003_hw_tx_iq_cal_reload(ah);
1415 1487
@@ -1455,14 +1527,38 @@ skip_tx_iqcal:
1455 return true; 1527 return true;
1456} 1528}
1457 1529
1530static bool do_ar9003_agc_cal(struct ath_hw *ah)
1531{
1532 struct ath_common *common = ath9k_hw_common(ah);
1533 bool status;
1534
1535 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
1536 REG_READ(ah, AR_PHY_AGC_CONTROL) |
1537 AR_PHY_AGC_CONTROL_CAL);
1538
1539 status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
1540 AR_PHY_AGC_CONTROL_CAL,
1541 0, AH_WAIT_TIMEOUT);
1542 if (!status) {
1543 ath_dbg(common, CALIBRATE,
1544 "offset calibration failed to complete in %d ms,"
1545 "noisy environment?\n",
1546 AH_WAIT_TIMEOUT / 1000);
1547 return false;
1548 }
1549
1550 return true;
1551}
1552
1458static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, 1553static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
1459 struct ath9k_channel *chan) 1554 struct ath9k_channel *chan)
1460{ 1555{
1461 struct ath_common *common = ath9k_hw_common(ah); 1556 struct ath_common *common = ath9k_hw_common(ah);
1462 struct ath9k_hw_cal_data *caldata = ah->caldata; 1557 struct ath9k_hw_cal_data *caldata = ah->caldata;
1463 bool txiqcal_done = false; 1558 bool txiqcal_done = false;
1464 bool is_reusable = true, status = true; 1559 bool status = true;
1465 bool run_agc_cal = false, sep_iq_cal = false; 1560 bool run_agc_cal = false, sep_iq_cal = false;
1561 int i = 0;
1466 1562
1467 /* Use chip chainmask only for calibration */ 1563 /* Use chip chainmask only for calibration */
1468 ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); 1564 ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
@@ -1485,7 +1581,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
1485 * AGC calibration. Specifically, AR9550 in SoC chips. 1581 * AGC calibration. Specifically, AR9550 in SoC chips.
1486 */ 1582 */
1487 if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) { 1583 if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
1488 txiqcal_done = true; 1584 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
1585 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
1586 txiqcal_done = true;
1587 } else {
1588 txiqcal_done = false;
1589 }
1489 run_agc_cal = true; 1590 run_agc_cal = true;
1490 } else { 1591 } else {
1491 sep_iq_cal = true; 1592 sep_iq_cal = true;
@@ -1512,27 +1613,37 @@ skip_tx_iqcal:
1512 if (AR_SREV_9330_11(ah)) 1613 if (AR_SREV_9330_11(ah))
1513 ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan)); 1614 ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
1514 1615
1515 /* Calibrate the AGC */ 1616 /*
1516 REG_WRITE(ah, AR_PHY_AGC_CONTROL, 1617 * For non-AR9550 chips, we just trigger AGC calibration
1517 REG_READ(ah, AR_PHY_AGC_CONTROL) | 1618 * in the HW, poll for completion and then process
1518 AR_PHY_AGC_CONTROL_CAL); 1619 * the results.
1519 1620 *
1520 /* Poll for offset calibration complete */ 1621 * For AR955x, we run it multiple times and use
1521 status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, 1622 * median IQ correction.
1522 AR_PHY_AGC_CONTROL_CAL, 1623 */
1523 0, AH_WAIT_TIMEOUT); 1624 if (!AR_SREV_9550(ah)) {
1524 } 1625 status = do_ar9003_agc_cal(ah);
1626 if (!status)
1627 return false;
1525 1628
1526 if (!status) { 1629 if (txiqcal_done)
1527 ath_dbg(common, CALIBRATE, 1630 ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
1528 "offset calibration failed to complete in %d ms; noisy environment?\n", 1631 } else {
1529 AH_WAIT_TIMEOUT / 1000); 1632 if (!txiqcal_done) {
1530 return false; 1633 status = do_ar9003_agc_cal(ah);
1634 if (!status)
1635 return false;
1636 } else {
1637 for (i = 0; i < MAXIQCAL; i++) {
1638 status = do_ar9003_agc_cal(ah);
1639 if (!status)
1640 return false;
1641 ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
1642 }
1643 }
1644 }
1531 } 1645 }
1532 1646
1533 if (txiqcal_done)
1534 ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
1535
1536 /* Revert chainmask to runtime parameters */ 1647 /* Revert chainmask to runtime parameters */
1537 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 1648 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
1538 1649
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b8daff78b9d1..235053ba7737 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -23,8 +23,8 @@
23#define COMP_HDR_LEN 4 23#define COMP_HDR_LEN 4
24#define COMP_CKSUM_LEN 2 24#define COMP_CKSUM_LEN 2
25 25
26#define LE16(x) __constant_cpu_to_le16(x) 26#define LE16(x) cpu_to_le16(x)
27#define LE32(x) __constant_cpu_to_le32(x) 27#define LE32(x) cpu_to_le32(x)
28 28
29/* Local defines to distinguish between extension and control CTL's */ 29/* Local defines to distinguish between extension and control CTL's */
30#define EXT_ADDITIVE (0x8000) 30#define EXT_ADDITIVE (0x8000)
@@ -4792,43 +4792,54 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
4792 4792
4793tempslope: 4793tempslope:
4794 if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) { 4794 if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
4795 u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
4796
4795 /* 4797 /*
4796 * AR955x has tempSlope register for each chain. 4798 * AR955x has tempSlope register for each chain.
4797 * Check whether temp_compensation feature is enabled or not. 4799 * Check whether temp_compensation feature is enabled or not.
4798 */ 4800 */
4799 if (eep->baseEepHeader.featureEnable & 0x1) { 4801 if (eep->baseEepHeader.featureEnable & 0x1) {
4800 if (frequency < 4000) { 4802 if (frequency < 4000) {
4801 REG_RMW_FIELD(ah, AR_PHY_TPC_19, 4803 if (txmask & BIT(0))
4802 AR_PHY_TPC_19_ALPHA_THERM, 4804 REG_RMW_FIELD(ah, AR_PHY_TPC_19,
4803 eep->base_ext2.tempSlopeLow); 4805 AR_PHY_TPC_19_ALPHA_THERM,
4804 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, 4806 eep->base_ext2.tempSlopeLow);
4805 AR_PHY_TPC_19_ALPHA_THERM, 4807 if (txmask & BIT(1))
4806 temp_slope); 4808 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
4807 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2, 4809 AR_PHY_TPC_19_ALPHA_THERM,
4808 AR_PHY_TPC_19_ALPHA_THERM, 4810 temp_slope);
4809 eep->base_ext2.tempSlopeHigh); 4811 if (txmask & BIT(2))
4812 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
4813 AR_PHY_TPC_19_ALPHA_THERM,
4814 eep->base_ext2.tempSlopeHigh);
4810 } else { 4815 } else {
4811 REG_RMW_FIELD(ah, AR_PHY_TPC_19, 4816 if (txmask & BIT(0))
4812 AR_PHY_TPC_19_ALPHA_THERM, 4817 REG_RMW_FIELD(ah, AR_PHY_TPC_19,
4813 temp_slope); 4818 AR_PHY_TPC_19_ALPHA_THERM,
4814 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, 4819 temp_slope);
4815 AR_PHY_TPC_19_ALPHA_THERM, 4820 if (txmask & BIT(1))
4816 temp_slope1); 4821 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
4817 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2, 4822 AR_PHY_TPC_19_ALPHA_THERM,
4818 AR_PHY_TPC_19_ALPHA_THERM, 4823 temp_slope1);
4819 temp_slope2); 4824 if (txmask & BIT(2))
4825 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
4826 AR_PHY_TPC_19_ALPHA_THERM,
4827 temp_slope2);
4820 } 4828 }
4821 } else { 4829 } else {
4822 /* 4830 /*
4823 * If temp compensation is not enabled, 4831 * If temp compensation is not enabled,
4824 * set all registers to 0. 4832 * set all registers to 0.
4825 */ 4833 */
4826 REG_RMW_FIELD(ah, AR_PHY_TPC_19, 4834 if (txmask & BIT(0))
4827 AR_PHY_TPC_19_ALPHA_THERM, 0); 4835 REG_RMW_FIELD(ah, AR_PHY_TPC_19,
4828 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, 4836 AR_PHY_TPC_19_ALPHA_THERM, 0);
4829 AR_PHY_TPC_19_ALPHA_THERM, 0); 4837 if (txmask & BIT(1))
4830 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2, 4838 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
4831 AR_PHY_TPC_19_ALPHA_THERM, 0); 4839 AR_PHY_TPC_19_ALPHA_THERM, 0);
4840 if (txmask & BIT(2))
4841 REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
4842 AR_PHY_TPC_19_ALPHA_THERM, 0);
4832 } 4843 }
4833 } else { 4844 } else {
4834 REG_RMW_FIELD(ah, AR_PHY_TPC_19, 4845 REG_RMW_FIELD(ah, AR_PHY_TPC_19,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 09facba1dc6d..8927fc34d84c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -868,10 +868,6 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
868 868
869 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 869 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
870 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); 870 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
871 if (IS_CHAN_QUARTER_RATE(chan))
872 rfMode |= AR_PHY_MODE_QUARTER;
873 if (IS_CHAN_HALF_RATE(chan))
874 rfMode |= AR_PHY_MODE_HALF;
875 871
876 if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF)) 872 if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
877 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, 873 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b5ac32cfbeb8..44d74495c4de 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -30,7 +30,6 @@
30#include "spectral.h" 30#include "spectral.h"
31 31
32struct ath_node; 32struct ath_node;
33struct ath_rate_table;
34 33
35extern struct ieee80211_ops ath9k_ops; 34extern struct ieee80211_ops ath9k_ops;
36extern int ath9k_modparam_nohwcrypt; 35extern int ath9k_modparam_nohwcrypt;
@@ -150,6 +149,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
150#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e)) 149#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
151#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf)) 150#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
152 151
152enum {
153 WLAN_RC_PHY_OFDM,
154 WLAN_RC_PHY_CCK,
155};
156
153struct ath_txq { 157struct ath_txq {
154 int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */ 158 int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
155 u32 axq_qnum; /* ath9k hardware queue number */ 159 u32 axq_qnum; /* ath9k hardware queue number */
@@ -399,21 +403,10 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
399#define ATH_BCBUF 8 403#define ATH_BCBUF 8
400#define ATH_DEFAULT_BINTVAL 100 /* TU */ 404#define ATH_DEFAULT_BINTVAL 100 /* TU */
401#define ATH_DEFAULT_BMISS_LIMIT 10 405#define ATH_DEFAULT_BMISS_LIMIT 10
402#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
403 406
404#define TSF_TO_TU(_h,_l) \ 407#define TSF_TO_TU(_h,_l) \
405 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 408 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
406 409
407struct ath_beacon_config {
408 int beacon_interval;
409 u16 listen_interval;
410 u16 dtim_period;
411 u16 bmiss_timeout;
412 u8 dtim_count;
413 bool enable_beacon;
414 bool ibss_creator;
415};
416
417struct ath_beacon { 410struct ath_beacon {
418 enum { 411 enum {
419 OK, /* no change needed */ 412 OK, /* no change needed */
@@ -423,11 +416,9 @@ struct ath_beacon {
423 416
424 u32 beaconq; 417 u32 beaconq;
425 u32 bmisscnt; 418 u32 bmisscnt;
426 u32 bc_tstamp;
427 struct ieee80211_vif *bslot[ATH_BCBUF]; 419 struct ieee80211_vif *bslot[ATH_BCBUF];
428 int slottime; 420 int slottime;
429 int slotupdate; 421 int slotupdate;
430 struct ath9k_tx_queue_info beacon_qi;
431 struct ath_descdma bdma; 422 struct ath_descdma bdma;
432 struct ath_txq *cabq; 423 struct ath_txq *cabq;
433 struct list_head bbuf; 424 struct list_head bbuf;
@@ -442,7 +433,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
442void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif); 433void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
443void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif); 434void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
444void ath9k_set_beacon(struct ath_softc *sc); 435void ath9k_set_beacon(struct ath_softc *sc);
445bool ath9k_csa_is_finished(struct ath_softc *sc); 436bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
437void ath9k_csa_update(struct ath_softc *sc);
446 438
447/*******************/ 439/*******************/
448/* Link Monitoring */ 440/* Link Monitoring */
@@ -693,15 +685,6 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
693#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 685#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
694#define MAX_GTT_CNT 5 686#define MAX_GTT_CNT 5
695 687
696enum sc_op_flags {
697 SC_OP_INVALID,
698 SC_OP_BEACONS,
699 SC_OP_ANI_RUN,
700 SC_OP_PRIM_STA_VIF,
701 SC_OP_HW_RESET,
702 SC_OP_SCANNING,
703};
704
705/* Powersave flags */ 688/* Powersave flags */
706#define PS_WAIT_FOR_BEACON BIT(0) 689#define PS_WAIT_FOR_BEACON BIT(0)
707#define PS_WAIT_FOR_CAB BIT(1) 690#define PS_WAIT_FOR_CAB BIT(1)
@@ -731,7 +714,6 @@ struct ath_softc {
731 struct completion paprd_complete; 714 struct completion paprd_complete;
732 wait_queue_head_t tx_wait; 715 wait_queue_head_t tx_wait;
733 716
734 unsigned long sc_flags;
735 unsigned long driver_data; 717 unsigned long driver_data;
736 718
737 u8 gtt_cnt; 719 u8 gtt_cnt;
@@ -748,7 +730,6 @@ struct ath_softc {
748 struct ath_rx rx; 730 struct ath_rx rx;
749 struct ath_tx tx; 731 struct ath_tx tx;
750 struct ath_beacon beacon; 732 struct ath_beacon beacon;
751 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
752 733
753#ifdef CONFIG_MAC80211_LEDS 734#ifdef CONFIG_MAC80211_LEDS
754 bool led_registered; 735 bool led_registered;
@@ -757,7 +738,6 @@ struct ath_softc {
757#endif 738#endif
758 739
759 struct ath9k_hw_cal_data caldata; 740 struct ath9k_hw_cal_data caldata;
760 int last_rssi;
761 741
762#ifdef CONFIG_ATH9K_DEBUGFS 742#ifdef CONFIG_ATH9K_DEBUGFS
763 struct ath9k_debug debug; 743 struct ath9k_debug debug;
@@ -774,7 +754,6 @@ struct ath_softc {
774#endif 754#endif
775 755
776 struct ath_descdma txsdma; 756 struct ath_descdma txsdma;
777 struct ieee80211_vif *csa_vif;
778 757
779 struct ath_ant_comb ant_comb; 758 struct ath_ant_comb ant_comb;
780 u8 ant_tx, ant_rx; 759 u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 2e8bba0eb361..471e0f624e81 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -80,7 +80,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
80 u8 chainmask = ah->txchainmask; 80 u8 chainmask = ah->txchainmask;
81 u8 rate = 0; 81 u8 rate = 0;
82 82
83 sband = &sc->sbands[common->hw->conf.chandef.chan->band]; 83 sband = &common->sbands[common->hw->conf.chandef.chan->band];
84 rate = sband->bitrates[rateidx].hw_value; 84 rate = sband->bitrates[rateidx].hw_value;
85 if (vif->bss_conf.use_short_preamble) 85 if (vif->bss_conf.use_short_preamble)
86 rate |= sband->bitrates[rateidx].hw_value_short; 86 rate |= sband->bitrates[rateidx].hw_value_short;
@@ -292,11 +292,8 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
292 (unsigned long long)tsfadjust, avp->av_bslot); 292 (unsigned long long)tsfadjust, avp->av_bslot);
293} 293}
294 294
295bool ath9k_csa_is_finished(struct ath_softc *sc) 295bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
296{ 296{
297 struct ieee80211_vif *vif;
298
299 vif = sc->csa_vif;
300 if (!vif || !vif->csa_active) 297 if (!vif || !vif->csa_active)
301 return false; 298 return false;
302 299
@@ -304,11 +301,23 @@ bool ath9k_csa_is_finished(struct ath_softc *sc)
304 return false; 301 return false;
305 302
306 ieee80211_csa_finish(vif); 303 ieee80211_csa_finish(vif);
307
308 sc->csa_vif = NULL;
309 return true; 304 return true;
310} 305}
311 306
307static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
308{
309 struct ath_softc *sc = data;
310 ath9k_csa_is_finished(sc, vif);
311}
312
313void ath9k_csa_update(struct ath_softc *sc)
314{
315 ieee80211_iterate_active_interfaces(sc->hw,
316 IEEE80211_IFACE_ITER_NORMAL,
317 ath9k_csa_update_vif,
318 sc);
319}
320
312void ath9k_beacon_tasklet(unsigned long data) 321void ath9k_beacon_tasklet(unsigned long data)
313{ 322{
314 struct ath_softc *sc = (struct ath_softc *)data; 323 struct ath_softc *sc = (struct ath_softc *)data;
@@ -319,7 +328,7 @@ void ath9k_beacon_tasklet(unsigned long data)
319 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 328 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
320 int slot; 329 int slot;
321 330
322 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) { 331 if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
323 ath_dbg(common, RESET, 332 ath_dbg(common, RESET,
324 "reset work is pending, skip beaconing now\n"); 333 "reset work is pending, skip beaconing now\n");
325 return; 334 return;
@@ -362,13 +371,13 @@ void ath9k_beacon_tasklet(unsigned long data)
362 return; 371 return;
363 } 372 }
364 373
365 /* EDMA devices check that in the tx completion function. */
366 if (!edma && ath9k_csa_is_finished(sc))
367 return;
368
369 slot = ath9k_beacon_choose_slot(sc); 374 slot = ath9k_beacon_choose_slot(sc);
370 vif = sc->beacon.bslot[slot]; 375 vif = sc->beacon.bslot[slot];
371 376
377 /* EDMA devices check that in the tx completion function. */
378 if (!edma && ath9k_csa_is_finished(sc, vif))
379 return;
380
372 if (!vif || !vif->bss_conf.enable_beacon) 381 if (!vif || !vif->bss_conf.enable_beacon)
373 return; 382 return;
374 383
@@ -438,33 +447,6 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
438 ath9k_hw_enable_interrupts(ah); 447 ath9k_hw_enable_interrupts(ah);
439} 448}
440 449
441/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
442static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
443{
444 u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
445
446 tsf_mod = tsf & (BIT(10) - 1);
447 tsf_hi = tsf >> 32;
448 tsf_lo = ((u32) tsf) >> 10;
449
450 mod_hi = tsf_hi % div_tu;
451 mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
452
453 return (mod_lo << 10) | tsf_mod;
454}
455
456static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
457 unsigned int interval)
458{
459 struct ath_hw *ah = sc->sc_ah;
460 unsigned int offset;
461
462 tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
463 offset = ath9k_mod_tsf64_tu(tsf, interval);
464
465 return (u32) tsf + TU_TO_USEC(interval) - offset;
466}
467
468/* 450/*
469 * For multi-bss ap support beacons are either staggered evenly over N slots or 451 * For multi-bss ap support beacons are either staggered evenly over N slots or
470 * burst together. For the former arrange for the SWBA to be delivered for each 452 * burst together. For the former arrange for the SWBA to be delivered for each
@@ -474,115 +456,18 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
474 struct ath_beacon_config *conf) 456 struct ath_beacon_config *conf)
475{ 457{
476 struct ath_hw *ah = sc->sc_ah; 458 struct ath_hw *ah = sc->sc_ah;
477 struct ath_common *common = ath9k_hw_common(ah);
478 u32 nexttbtt, intval;
479
480 /* NB: the beacon interval is kept internally in TU's */
481 intval = TU_TO_USEC(conf->beacon_interval);
482 intval /= ATH_BCBUF;
483 nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
484 conf->beacon_interval);
485
486 if (conf->enable_beacon)
487 ah->imask |= ATH9K_INT_SWBA;
488 else
489 ah->imask &= ~ATH9K_INT_SWBA;
490
491 ath_dbg(common, BEACON,
492 "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
493 (conf->enable_beacon) ? "Enable" : "Disable",
494 nexttbtt, intval, conf->beacon_interval);
495 459
496 ath9k_beacon_init(sc, nexttbtt, intval, false); 460 ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF);
461 ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false);
497} 462}
498 463
499/* 464static void ath9k_beacon_config_sta(struct ath_hw *ah,
500 * This sets up the beacon timers according to the timestamp of the last
501 * received beacon and the current TSF, configures PCF and DTIM
502 * handling, programs the sleep registers so the hardware will wakeup in
503 * time to receive beacons, and configures the beacon miss handling so
504 * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
505 * we've associated with.
506 */
507static void ath9k_beacon_config_sta(struct ath_softc *sc,
508 struct ath_beacon_config *conf) 465 struct ath_beacon_config *conf)
509{ 466{
510 struct ath_hw *ah = sc->sc_ah;
511 struct ath_common *common = ath9k_hw_common(ah);
512 struct ath9k_beacon_state bs; 467 struct ath9k_beacon_state bs;
513 int dtim_intval, sleepduration;
514 u32 nexttbtt = 0, intval;
515 u64 tsf;
516 468
517 /* No need to configure beacon if we are not associated */ 469 if (ath9k_cmn_beacon_config_sta(ah, conf, &bs) == -EPERM)
518 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
519 ath_dbg(common, BEACON,
520 "STA is not yet associated..skipping beacon config\n");
521 return; 470 return;
522 }
523
524 memset(&bs, 0, sizeof(bs));
525 intval = conf->beacon_interval;
526
527 /*
528 * Setup dtim parameters according to
529 * last beacon we received (which may be none).
530 */
531 dtim_intval = intval * conf->dtim_period;
532 sleepduration = conf->listen_interval * intval;
533
534 /*
535 * Pull nexttbtt forward to reflect the current
536 * TSF and calculate dtim state for the result.
537 */
538 tsf = ath9k_hw_gettsf64(ah);
539 nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
540
541 bs.bs_intval = TU_TO_USEC(intval);
542 bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
543 bs.bs_nexttbtt = nexttbtt;
544 bs.bs_nextdtim = nexttbtt;
545 if (conf->dtim_period > 1)
546 bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
547
548 /*
549 * Calculate the number of consecutive beacons to miss* before taking
550 * a BMISS interrupt. The configuration is specified in TU so we only
551 * need calculate based on the beacon interval. Note that we clamp the
552 * result to at most 15 beacons.
553 */
554 if (sleepduration > intval) {
555 bs.bs_bmissthreshold = conf->listen_interval *
556 ATH_DEFAULT_BMISS_LIMIT / 2;
557 } else {
558 bs.bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, intval);
559 if (bs.bs_bmissthreshold > 15)
560 bs.bs_bmissthreshold = 15;
561 else if (bs.bs_bmissthreshold <= 0)
562 bs.bs_bmissthreshold = 1;
563 }
564
565 /*
566 * Calculate sleep duration. The configuration is given in ms.
567 * We ensure a multiple of the beacon period is used. Also, if the sleep
568 * duration is greater than the DTIM period then it makes senses
569 * to make it a multiple of that.
570 *
571 * XXX fixed at 100ms
572 */
573
574 bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
575 sleepduration));
576 if (bs.bs_sleepduration > bs.bs_dtimperiod)
577 bs.bs_sleepduration = bs.bs_dtimperiod;
578
579 /* TSF out of range threshold fixed at 1 second */
580 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
581
582 ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
583 bs.bs_bmissthreshold, bs.bs_sleepduration);
584
585 /* Set the computed STA beacon timers */
586 471
587 ath9k_hw_disable_interrupts(ah); 472 ath9k_hw_disable_interrupts(ah);
588 ath9k_hw_set_sta_beacon_timers(ah, &bs); 473 ath9k_hw_set_sta_beacon_timers(ah, &bs);
@@ -597,36 +482,19 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
597{ 482{
598 struct ath_hw *ah = sc->sc_ah; 483 struct ath_hw *ah = sc->sc_ah;
599 struct ath_common *common = ath9k_hw_common(ah); 484 struct ath_common *common = ath9k_hw_common(ah);
600 u32 intval, nexttbtt;
601 485
602 ath9k_reset_beacon_status(sc); 486 ath9k_reset_beacon_status(sc);
603 487
604 intval = TU_TO_USEC(conf->beacon_interval); 488 ath9k_cmn_beacon_config_adhoc(ah, conf);
605
606 if (conf->ibss_creator)
607 nexttbtt = intval;
608 else
609 nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
610 conf->beacon_interval);
611
612 if (conf->enable_beacon)
613 ah->imask |= ATH9K_INT_SWBA;
614 else
615 ah->imask &= ~ATH9K_INT_SWBA;
616
617 ath_dbg(common, BEACON,
618 "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
619 (conf->enable_beacon) ? "Enable" : "Disable",
620 nexttbtt, intval, conf->beacon_interval);
621 489
622 ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator); 490 ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator);
623 491
624 /* 492 /*
625 * Set the global 'beacon has been configured' flag for the 493 * Set the global 'beacon has been configured' flag for the
626 * joiner case in IBSS mode. 494 * joiner case in IBSS mode.
627 */ 495 */
628 if (!conf->ibss_creator && conf->enable_beacon) 496 if (!conf->ibss_creator && conf->enable_beacon)
629 set_bit(SC_OP_BEACONS, &sc->sc_flags); 497 set_bit(ATH_OP_BEACONS, &common->op_flags);
630} 498}
631 499
632static bool ath9k_allow_beacon_config(struct ath_softc *sc, 500static bool ath9k_allow_beacon_config(struct ath_softc *sc,
@@ -646,7 +514,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
646 514
647 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) { 515 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
648 if ((vif->type == NL80211_IFTYPE_STATION) && 516 if ((vif->type == NL80211_IFTYPE_STATION) &&
649 test_bit(SC_OP_BEACONS, &sc->sc_flags) && 517 test_bit(ATH_OP_BEACONS, &common->op_flags) &&
650 !avp->primary_sta_vif) { 518 !avp->primary_sta_vif) {
651 ath_dbg(common, CONFIG, 519 ath_dbg(common, CONFIG,
652 "Beacon already configured for a station interface\n"); 520 "Beacon already configured for a station interface\n");
@@ -668,7 +536,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
668 536
669 cur_conf->beacon_interval = bss_conf->beacon_int; 537 cur_conf->beacon_interval = bss_conf->beacon_int;
670 cur_conf->dtim_period = bss_conf->dtim_period; 538 cur_conf->dtim_period = bss_conf->dtim_period;
671 cur_conf->listen_interval = 1;
672 cur_conf->dtim_count = 1; 539 cur_conf->dtim_count = 1;
673 cur_conf->ibss_creator = bss_conf->ibss_creator; 540 cur_conf->ibss_creator = bss_conf->ibss_creator;
674 cur_conf->bmiss_timeout = 541 cur_conf->bmiss_timeout =
@@ -698,6 +565,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
698{ 565{
699 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 566 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
700 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 567 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
568 struct ath_hw *ah = sc->sc_ah;
569 struct ath_common *common = ath9k_hw_common(ah);
701 unsigned long flags; 570 unsigned long flags;
702 bool skip_beacon = false; 571 bool skip_beacon = false;
703 572
@@ -710,7 +579,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
710 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) { 579 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
711 ath9k_cache_beacon_config(sc, bss_conf); 580 ath9k_cache_beacon_config(sc, bss_conf);
712 ath9k_set_beacon(sc); 581 ath9k_set_beacon(sc);
713 set_bit(SC_OP_BEACONS, &sc->sc_flags); 582 set_bit(ATH_OP_BEACONS, &common->op_flags);
714 return; 583 return;
715 } 584 }
716 585
@@ -749,13 +618,13 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
749 } 618 }
750 619
751 /* 620 /*
752 * Do not set the SC_OP_BEACONS flag for IBSS joiner mode 621 * Do not set the ATH_OP_BEACONS flag for IBSS joiner mode
753 * here, it is done in ath9k_beacon_config_adhoc(). 622 * here, it is done in ath9k_beacon_config_adhoc().
754 */ 623 */
755 if (cur_conf->enable_beacon && !skip_beacon) 624 if (cur_conf->enable_beacon && !skip_beacon)
756 set_bit(SC_OP_BEACONS, &sc->sc_flags); 625 set_bit(ATH_OP_BEACONS, &common->op_flags);
757 else 626 else
758 clear_bit(SC_OP_BEACONS, &sc->sc_flags); 627 clear_bit(ATH_OP_BEACONS, &common->op_flags);
759 } 628 }
760} 629}
761 630
@@ -773,7 +642,7 @@ void ath9k_set_beacon(struct ath_softc *sc)
773 ath9k_beacon_config_adhoc(sc, cur_conf); 642 ath9k_beacon_config_adhoc(sc, cur_conf);
774 break; 643 break;
775 case NL80211_IFTYPE_STATION: 644 case NL80211_IFTYPE_STATION:
776 ath9k_beacon_config_sta(sc, cur_conf); 645 ath9k_beacon_config_sta(sc->sc_ah, cur_conf);
777 break; 646 break;
778 default: 647 default:
779 ath_dbg(common, CONFIG, "Unsupported beaconing mode\n"); 648 ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c
new file mode 100644
index 000000000000..775d1d20ce0b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.c
@@ -0,0 +1,180 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "common.h"
18
19#define FUDGE 2
20
21/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
22static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
23{
24 u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
25
26 tsf_mod = tsf & (BIT(10) - 1);
27 tsf_hi = tsf >> 32;
28 tsf_lo = ((u32) tsf) >> 10;
29
30 mod_hi = tsf_hi % div_tu;
31 mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
32
33 return (mod_lo << 10) | tsf_mod;
34}
35
36static u32 ath9k_get_next_tbtt(struct ath_hw *ah, u64 tsf,
37 unsigned int interval)
38{
39 unsigned int offset;
40
41 tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
42 offset = ath9k_mod_tsf64_tu(tsf, interval);
43
44 return (u32) tsf + TU_TO_USEC(interval) - offset;
45}
46
47/*
48 * This sets up the beacon timers according to the timestamp of the last
49 * received beacon and the current TSF, configures PCF and DTIM
50 * handling, programs the sleep registers so the hardware will wakeup in
51 * time to receive beacons, and configures the beacon miss handling so
52 * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
53 * we've associated with.
54 */
55int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
56 struct ath_beacon_config *conf,
57 struct ath9k_beacon_state *bs)
58{
59 struct ath_common *common = ath9k_hw_common(ah);
60 int dtim_intval;
61 u64 tsf;
62
63 /* No need to configure beacon if we are not associated */
64 if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
65 ath_dbg(common, BEACON,
66 "STA is not yet associated..skipping beacon config\n");
67 return -EPERM;
68 }
69
70 memset(bs, 0, sizeof(*bs));
71 conf->intval = conf->beacon_interval;
72
73 /*
74 * Setup dtim parameters according to
75 * last beacon we received (which may be none).
76 */
77 dtim_intval = conf->intval * conf->dtim_period;
78
79 /*
80 * Pull nexttbtt forward to reflect the current
81 * TSF and calculate dtim state for the result.
82 */
83 tsf = ath9k_hw_gettsf64(ah);
84 conf->nexttbtt = ath9k_get_next_tbtt(ah, tsf, conf->intval);
85
86 bs->bs_intval = TU_TO_USEC(conf->intval);
87 bs->bs_dtimperiod = conf->dtim_period * bs->bs_intval;
88 bs->bs_nexttbtt = conf->nexttbtt;
89 bs->bs_nextdtim = conf->nexttbtt;
90 if (conf->dtim_period > 1)
91 bs->bs_nextdtim = ath9k_get_next_tbtt(ah, tsf, dtim_intval);
92
93 /*
94 * Calculate the number of consecutive beacons to miss* before taking
95 * a BMISS interrupt. The configuration is specified in TU so we only
96 * need calculate based on the beacon interval. Note that we clamp the
97 * result to at most 15 beacons.
98 */
99 bs->bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, conf->intval);
100 if (bs->bs_bmissthreshold > 15)
101 bs->bs_bmissthreshold = 15;
102 else if (bs->bs_bmissthreshold <= 0)
103 bs->bs_bmissthreshold = 1;
104
105 /*
106 * Calculate sleep duration. The configuration is given in ms.
107 * We ensure a multiple of the beacon period is used. Also, if the sleep
108 * duration is greater than the DTIM period then it makes senses
109 * to make it a multiple of that.
110 *
111 * XXX fixed at 100ms
112 */
113
114 bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
115 conf->intval));
116 if (bs->bs_sleepduration > bs->bs_dtimperiod)
117 bs->bs_sleepduration = bs->bs_dtimperiod;
118
119 /* TSF out of range threshold fixed at 1 second */
120 bs->bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
121
122 ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
123 bs->bs_bmissthreshold, bs->bs_sleepduration);
124 return 0;
125}
126EXPORT_SYMBOL(ath9k_cmn_beacon_config_sta);
127
128void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
129 struct ath_beacon_config *conf)
130{
131 struct ath_common *common = ath9k_hw_common(ah);
132
133 conf->intval = TU_TO_USEC(conf->beacon_interval);
134
135 if (conf->ibss_creator)
136 conf->nexttbtt = conf->intval;
137 else
138 conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
139 conf->beacon_interval);
140
141 if (conf->enable_beacon)
142 ah->imask |= ATH9K_INT_SWBA;
143 else
144 ah->imask &= ~ATH9K_INT_SWBA;
145
146 ath_dbg(common, BEACON,
147 "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
148 (conf->enable_beacon) ? "Enable" : "Disable",
149 conf->nexttbtt, conf->intval, conf->beacon_interval);
150}
151EXPORT_SYMBOL(ath9k_cmn_beacon_config_adhoc);
152
153/*
154 * For multi-bss ap support beacons are either staggered evenly over N slots or
155 * burst together. For the former arrange for the SWBA to be delivered for each
156 * slot. Slots that are not occupied will generate nothing.
157 */
158void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
159 struct ath_beacon_config *conf,
160 unsigned int bc_buf)
161{
162 struct ath_common *common = ath9k_hw_common(ah);
163
164 /* NB: the beacon interval is kept internally in TU's */
165 conf->intval = TU_TO_USEC(conf->beacon_interval);
166 conf->intval /= bc_buf;
167 conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
168 conf->beacon_interval);
169
170 if (conf->enable_beacon)
171 ah->imask |= ATH9K_INT_SWBA;
172 else
173 ah->imask &= ~ATH9K_INT_SWBA;
174
175 ath_dbg(common, BEACON,
176 "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
177 (conf->enable_beacon) ? "Enable" : "Disable",
178 conf->nexttbtt, conf->intval, conf->beacon_interval);
179}
180EXPORT_SYMBOL(ath9k_cmn_beacon_config_ap);
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.h b/drivers/net/wireless/ath/ath9k/common-beacon.h
new file mode 100644
index 000000000000..3665d27f0dc7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2009-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17struct ath_beacon_config;
18
19int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
20 struct ath_beacon_config *conf,
21 struct ath9k_beacon_state *bs);
22void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
23 struct ath_beacon_config *conf);
24void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
25 struct ath_beacon_config *conf,
26 unsigned int bc_buf);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
new file mode 100644
index 000000000000..a006c1499728
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* We use the hw_value as an index into our private channel structure */
18
19#include "common.h"
20
21#define CHAN2G(_freq, _idx) { \
22 .band = IEEE80211_BAND_2GHZ, \
23 .center_freq = (_freq), \
24 .hw_value = (_idx), \
25 .max_power = 20, \
26}
27
28#define CHAN5G(_freq, _idx) { \
29 .band = IEEE80211_BAND_5GHZ, \
30 .center_freq = (_freq), \
31 .hw_value = (_idx), \
32 .max_power = 20, \
33}
34
35/* Some 2 GHz radios are actually tunable on 2312-2732
36 * on 5 MHz steps, we support the channels which we know
37 * we have calibration data for all cards though to make
38 * this static */
39static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
40 CHAN2G(2412, 0), /* Channel 1 */
41 CHAN2G(2417, 1), /* Channel 2 */
42 CHAN2G(2422, 2), /* Channel 3 */
43 CHAN2G(2427, 3), /* Channel 4 */
44 CHAN2G(2432, 4), /* Channel 5 */
45 CHAN2G(2437, 5), /* Channel 6 */
46 CHAN2G(2442, 6), /* Channel 7 */
47 CHAN2G(2447, 7), /* Channel 8 */
48 CHAN2G(2452, 8), /* Channel 9 */
49 CHAN2G(2457, 9), /* Channel 10 */
50 CHAN2G(2462, 10), /* Channel 11 */
51 CHAN2G(2467, 11), /* Channel 12 */
52 CHAN2G(2472, 12), /* Channel 13 */
53 CHAN2G(2484, 13), /* Channel 14 */
54};
55
56/* Some 5 GHz radios are actually tunable on XXXX-YYYY
57 * on 5 MHz steps, we support the channels which we know
58 * we have calibration data for all cards though to make
59 * this static */
60static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
61 /* _We_ call this UNII 1 */
62 CHAN5G(5180, 14), /* Channel 36 */
63 CHAN5G(5200, 15), /* Channel 40 */
64 CHAN5G(5220, 16), /* Channel 44 */
65 CHAN5G(5240, 17), /* Channel 48 */
66 /* _We_ call this UNII 2 */
67 CHAN5G(5260, 18), /* Channel 52 */
68 CHAN5G(5280, 19), /* Channel 56 */
69 CHAN5G(5300, 20), /* Channel 60 */
70 CHAN5G(5320, 21), /* Channel 64 */
71 /* _We_ call this "Middle band" */
72 CHAN5G(5500, 22), /* Channel 100 */
73 CHAN5G(5520, 23), /* Channel 104 */
74 CHAN5G(5540, 24), /* Channel 108 */
75 CHAN5G(5560, 25), /* Channel 112 */
76 CHAN5G(5580, 26), /* Channel 116 */
77 CHAN5G(5600, 27), /* Channel 120 */
78 CHAN5G(5620, 28), /* Channel 124 */
79 CHAN5G(5640, 29), /* Channel 128 */
80 CHAN5G(5660, 30), /* Channel 132 */
81 CHAN5G(5680, 31), /* Channel 136 */
82 CHAN5G(5700, 32), /* Channel 140 */
83 /* _We_ call this UNII 3 */
84 CHAN5G(5745, 33), /* Channel 149 */
85 CHAN5G(5765, 34), /* Channel 153 */
86 CHAN5G(5785, 35), /* Channel 157 */
87 CHAN5G(5805, 36), /* Channel 161 */
88 CHAN5G(5825, 37), /* Channel 165 */
89};
90
91/* Atheros hardware rate code addition for short premble */
92#define SHPCHECK(__hw_rate, __flags) \
93 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
94
95#define RATE(_bitrate, _hw_rate, _flags) { \
96 .bitrate = (_bitrate), \
97 .flags = (_flags), \
98 .hw_value = (_hw_rate), \
99 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
100}
101
102static struct ieee80211_rate ath9k_legacy_rates[] = {
103 RATE(10, 0x1b, 0),
104 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
105 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
106 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
107 RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
108 IEEE80211_RATE_SUPPORTS_10MHZ)),
109 RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
110 IEEE80211_RATE_SUPPORTS_10MHZ)),
111 RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
112 IEEE80211_RATE_SUPPORTS_10MHZ)),
113 RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
114 IEEE80211_RATE_SUPPORTS_10MHZ)),
115 RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
116 IEEE80211_RATE_SUPPORTS_10MHZ)),
117 RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
118 IEEE80211_RATE_SUPPORTS_10MHZ)),
119 RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
120 IEEE80211_RATE_SUPPORTS_10MHZ)),
121 RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
122 IEEE80211_RATE_SUPPORTS_10MHZ)),
123};
124
125int ath9k_cmn_init_channels_rates(struct ath_common *common)
126{
127 struct ath_hw *ah = (struct ath_hw *)common->ah;
128 void *channels;
129
130 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
131 ARRAY_SIZE(ath9k_5ghz_chantable) !=
132 ATH9K_NUM_CHANNELS);
133
134 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
135 channels = devm_kzalloc(ah->dev,
136 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
137 if (!channels)
138 return -ENOMEM;
139
140 memcpy(channels, ath9k_2ghz_chantable,
141 sizeof(ath9k_2ghz_chantable));
142 common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
143 common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
144 common->sbands[IEEE80211_BAND_2GHZ].n_channels =
145 ARRAY_SIZE(ath9k_2ghz_chantable);
146 common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
147 common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
148 ARRAY_SIZE(ath9k_legacy_rates);
149 }
150
151 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
152 channels = devm_kzalloc(ah->dev,
153 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
154 if (!channels)
155 return -ENOMEM;
156
157 memcpy(channels, ath9k_5ghz_chantable,
158 sizeof(ath9k_5ghz_chantable));
159 common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
160 common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
161 common->sbands[IEEE80211_BAND_5GHZ].n_channels =
162 ARRAY_SIZE(ath9k_5ghz_chantable);
163 common->sbands[IEEE80211_BAND_5GHZ].bitrates =
164 ath9k_legacy_rates + 4;
165 common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
166 ARRAY_SIZE(ath9k_legacy_rates) - 4;
167 }
168 return 0;
169}
170EXPORT_SYMBOL(ath9k_cmn_init_channels_rates);
171
172void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
173 struct ieee80211_sta_ht_cap *ht_info)
174{
175 struct ath_common *common = ath9k_hw_common(ah);
176 u8 tx_streams, rx_streams;
177 int i, max_streams;
178
179 ht_info->ht_supported = true;
180 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
181 IEEE80211_HT_CAP_SM_PS |
182 IEEE80211_HT_CAP_SGI_40 |
183 IEEE80211_HT_CAP_DSSSCCK40;
184
185 if (ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
186 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
187
188 if (ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
189 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
190
191 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
192 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
193
194 if (AR_SREV_9271(ah) || AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
195 max_streams = 1;
196 else if (AR_SREV_9462(ah))
197 max_streams = 2;
198 else if (AR_SREV_9300_20_OR_LATER(ah))
199 max_streams = 3;
200 else
201 max_streams = 2;
202
203 if (AR_SREV_9280_20_OR_LATER(ah)) {
204 if (max_streams >= 2)
205 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
206 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
207 }
208
209 /* set up supported mcs set */
210 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
211 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
212 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
213
214 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
215 tx_streams, rx_streams);
216
217 if (tx_streams != rx_streams) {
218 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
219 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
220 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
221 }
222
223 for (i = 0; i < rx_streams; i++)
224 ht_info->mcs.rx_mask[i] = 0xff;
225
226 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
227}
228EXPORT_SYMBOL(ath9k_cmn_setup_ht_cap);
229
230void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
231{
232 struct ath_common *common = ath9k_hw_common(ah);
233
234 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_HT))
235 return;
236
237 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
238 ath9k_cmn_setup_ht_cap(ah,
239 &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
240 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
241 ath9k_cmn_setup_ht_cap(ah,
242 &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
243}
244EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.h b/drivers/net/wireless/ath/ath9k/common-init.h
new file mode 100644
index 000000000000..ac03fca5ffdd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (c) 2009-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17int ath9k_cmn_init_channels_rates(struct ath_common *common);
18void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
19 struct ieee80211_sta_ht_cap *ht_info);
20void ath9k_cmn_reload_chainmask(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 768c733cad31..c6dd7f1fed65 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,6 +27,250 @@ MODULE_AUTHOR("Atheros Communications");
27MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards."); 27MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
28MODULE_LICENSE("Dual BSD/GPL"); 28MODULE_LICENSE("Dual BSD/GPL");
29 29
30/* Assumes you've already done the endian to CPU conversion */
31bool ath9k_cmn_rx_accept(struct ath_common *common,
32 struct ieee80211_hdr *hdr,
33 struct ieee80211_rx_status *rxs,
34 struct ath_rx_status *rx_stats,
35 bool *decrypt_error,
36 unsigned int rxfilter)
37{
38 struct ath_hw *ah = common->ah;
39 bool is_mc, is_valid_tkip, strip_mic, mic_error;
40 __le16 fc;
41
42 fc = hdr->frame_control;
43
44 is_mc = !!is_multicast_ether_addr(hdr->addr1);
45 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
46 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
47 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
48 ieee80211_has_protected(fc) &&
49 !(rx_stats->rs_status &
50 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
51 ATH9K_RXERR_KEYMISS));
52
53 /*
54 * Key miss events are only relevant for pairwise keys where the
55 * descriptor does contain a valid key index. This has been observed
56 * mostly with CCMP encryption.
57 */
58 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
59 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
60 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
61
62 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
63 !ieee80211_has_morefrags(fc) &&
64 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
65 (rx_stats->rs_status & ATH9K_RXERR_MIC);
66
67 /*
68 * The rx_stats->rs_status will not be set until the end of the
69 * chained descriptors so it can be ignored if rs_more is set. The
70 * rs_more will be false at the last element of the chained
71 * descriptors.
72 */
73 if (rx_stats->rs_status != 0) {
74 u8 status_mask;
75
76 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
77 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
78 mic_error = false;
79 }
80
81 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
82 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
83 *decrypt_error = true;
84 mic_error = false;
85 }
86
87
88 /*
89 * Reject error frames with the exception of
90 * decryption and MIC failures. For monitor mode,
91 * we also ignore the CRC error.
92 */
93 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
94 ATH9K_RXERR_KEYMISS;
95
96 if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
97 status_mask |= ATH9K_RXERR_CRC;
98
99 if (rx_stats->rs_status & ~status_mask)
100 return false;
101 }
102
103 /*
104 * For unicast frames the MIC error bit can have false positives,
105 * so all MIC error reports need to be validated in software.
106 * False negatives are not common, so skip software verification
107 * if the hardware considers the MIC valid.
108 */
109 if (strip_mic)
110 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
111 else if (is_mc && mic_error)
112 rxs->flag |= RX_FLAG_MMIC_ERROR;
113
114 return true;
115}
116EXPORT_SYMBOL(ath9k_cmn_rx_accept);
117
118void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
119 struct sk_buff *skb,
120 struct ath_rx_status *rx_stats,
121 struct ieee80211_rx_status *rxs,
122 bool decrypt_error)
123{
124 struct ath_hw *ah = common->ah;
125 struct ieee80211_hdr *hdr;
126 int hdrlen, padpos, padsize;
127 u8 keyix;
128 __le16 fc;
129
130 /* see if any padding is done by the hw and remove it */
131 hdr = (struct ieee80211_hdr *) skb->data;
132 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
133 fc = hdr->frame_control;
134 padpos = ieee80211_hdrlen(fc);
135
136 /* The MAC header is padded to have 32-bit boundary if the
137 * packet payload is non-zero. The general calculation for
138 * padsize would take into account odd header lengths:
139 * padsize = (4 - padpos % 4) % 4; However, since only
140 * even-length headers are used, padding can only be 0 or 2
141 * bytes and we can optimize this a bit. In addition, we must
142 * not try to remove padding from short control frames that do
143 * not have payload. */
144 padsize = padpos & 3;
145 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
146 memmove(skb->data + padsize, skb->data, padpos);
147 skb_pull(skb, padsize);
148 }
149
150 keyix = rx_stats->rs_keyix;
151
152 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
153 ieee80211_has_protected(fc)) {
154 rxs->flag |= RX_FLAG_DECRYPTED;
155 } else if (ieee80211_has_protected(fc)
156 && !decrypt_error && skb->len >= hdrlen + 4) {
157 keyix = skb->data[hdrlen + 3] >> 6;
158
159 if (test_bit(keyix, common->keymap))
160 rxs->flag |= RX_FLAG_DECRYPTED;
161 }
162 if (ah->sw_mgmt_crypto &&
163 (rxs->flag & RX_FLAG_DECRYPTED) &&
164 ieee80211_is_mgmt(fc))
165 /* Use software decrypt for management frames. */
166 rxs->flag &= ~RX_FLAG_DECRYPTED;
167}
168EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
169
170int ath9k_cmn_process_rate(struct ath_common *common,
171 struct ieee80211_hw *hw,
172 struct ath_rx_status *rx_stats,
173 struct ieee80211_rx_status *rxs)
174{
175 struct ieee80211_supported_band *sband;
176 enum ieee80211_band band;
177 unsigned int i = 0;
178 struct ath_hw *ah = common->ah;
179
180 band = ah->curchan->chan->band;
181 sband = hw->wiphy->bands[band];
182
183 if (IS_CHAN_QUARTER_RATE(ah->curchan))
184 rxs->flag |= RX_FLAG_5MHZ;
185 else if (IS_CHAN_HALF_RATE(ah->curchan))
186 rxs->flag |= RX_FLAG_10MHZ;
187
188 if (rx_stats->rs_rate & 0x80) {
189 /* HT rate */
190 rxs->flag |= RX_FLAG_HT;
191 rxs->flag |= rx_stats->flag;
192 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
193 return 0;
194 }
195
196 for (i = 0; i < sband->n_bitrates; i++) {
197 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
198 rxs->rate_idx = i;
199 return 0;
200 }
201 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
202 rxs->flag |= RX_FLAG_SHORTPRE;
203 rxs->rate_idx = i;
204 return 0;
205 }
206 }
207
208 return -EINVAL;
209}
210EXPORT_SYMBOL(ath9k_cmn_process_rate);
211
212void ath9k_cmn_process_rssi(struct ath_common *common,
213 struct ieee80211_hw *hw,
214 struct ath_rx_status *rx_stats,
215 struct ieee80211_rx_status *rxs)
216{
217 struct ath_hw *ah = common->ah;
218 int last_rssi;
219 int rssi = rx_stats->rs_rssi;
220 int i, j;
221
222 /*
223 * RSSI is not available for subframes in an A-MPDU.
224 */
225 if (rx_stats->rs_moreaggr) {
226 rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
227 return;
228 }
229
230 /*
231 * Check if the RSSI for the last subframe in an A-MPDU
232 * or an unaggregated frame is valid.
233 */
234 if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
235 rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
236 return;
237 }
238
239 for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
240 s8 rssi;
241
242 if (!(ah->rxchainmask & BIT(i)))
243 continue;
244
245 rssi = rx_stats->rs_rssi_ctl[i];
246 if (rssi != ATH9K_RSSI_BAD) {
247 rxs->chains |= BIT(j);
248 rxs->chain_signal[j] = ah->noise + rssi;
249 }
250 j++;
251 }
252
253 /*
254 * Update Beacon RSSI, this is used by ANI.
255 */
256 if (rx_stats->is_mybeacon &&
257 ((ah->opmode == NL80211_IFTYPE_STATION) ||
258 (ah->opmode == NL80211_IFTYPE_ADHOC))) {
259 ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
260 last_rssi = common->last_rssi;
261
262 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
263 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
264 if (rssi < 0)
265 rssi = 0;
266
267 ah->stats.avgbrssi = rssi;
268 }
269
270 rxs->signal = ah->noise + rx_stats->rs_rssi;
271}
272EXPORT_SYMBOL(ath9k_cmn_process_rssi);
273
30int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb) 274int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
31{ 275{
32 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 276 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index eb85e1bdca88..ca38116838f0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -21,6 +21,9 @@
21#include "hw.h" 21#include "hw.h"
22#include "hw-ops.h" 22#include "hw-ops.h"
23 23
24#include "common-init.h"
25#include "common-beacon.h"
26
24/* Common header for Atheros 802.11n base driver cores */ 27/* Common header for Atheros 802.11n base driver cores */
25 28
26#define WME_BA_BMP_SIZE 64 29#define WME_BA_BMP_SIZE 64
@@ -42,6 +45,38 @@
42#define ATH_EP_RND(x, mul) \ 45#define ATH_EP_RND(x, mul) \
43 (((x) + ((mul)/2)) / (mul)) 46 (((x) + ((mul)/2)) / (mul))
44 47
48#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
49
50struct ath_beacon_config {
51 int beacon_interval;
52 u16 dtim_period;
53 u16 bmiss_timeout;
54 u8 dtim_count;
55 bool enable_beacon;
56 bool ibss_creator;
57 u32 nexttbtt;
58 u32 intval;
59};
60
61bool ath9k_cmn_rx_accept(struct ath_common *common,
62 struct ieee80211_hdr *hdr,
63 struct ieee80211_rx_status *rxs,
64 struct ath_rx_status *rx_stats,
65 bool *decrypt_error,
66 unsigned int rxfilter);
67void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
68 struct sk_buff *skb,
69 struct ath_rx_status *rx_stats,
70 struct ieee80211_rx_status *rxs,
71 bool decrypt_error);
72int ath9k_cmn_process_rate(struct ath_common *common,
73 struct ieee80211_hw *hw,
74 struct ath_rx_status *rx_stats,
75 struct ieee80211_rx_status *rxs);
76void ath9k_cmn_process_rssi(struct ath_common *common,
77 struct ieee80211_hw *hw,
78 struct ath_rx_status *rx_stats,
79 struct ieee80211_rx_status *rxs);
45int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 80int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
46struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw, 81struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
47 struct ath_hw *ah, 82 struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index ab7264c1d8f7..780ff1bee6f6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -135,46 +135,45 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
135 struct ath_softc *sc = file->private_data; 135 struct ath_softc *sc = file->private_data;
136 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 136 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
137 struct ath_hw *ah = sc->sc_ah; 137 struct ath_hw *ah = sc->sc_ah;
138 unsigned int len = 0, size = 1024; 138 unsigned int len = 0;
139 const unsigned int size = 1024;
139 ssize_t retval = 0; 140 ssize_t retval = 0;
140 char *buf; 141 char *buf;
142 int i;
143 struct {
144 const char *name;
145 unsigned int val;
146 } ani_info[] = {
147 { "ANI RESET", ah->stats.ast_ani_reset },
148 { "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
149 { "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
150 { "SPUR UP", ah->stats.ast_ani_spurup },
151 { "SPUR DOWN", ah->stats.ast_ani_spurup },
152 { "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
153 { "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
154 { "MRC-CCK ON", ah->stats.ast_ani_ccklow },
155 { "MRC-CCK OFF", ah->stats.ast_ani_cckhigh },
156 { "FIR-STEP UP", ah->stats.ast_ani_stepup },
157 { "FIR-STEP DOWN", ah->stats.ast_ani_stepdown },
158 { "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero },
159 { "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs },
160 { "CCK ERRORS", ah->stats.ast_ani_cckerrs },
161 };
141 162
142 buf = kzalloc(size, GFP_KERNEL); 163 buf = kzalloc(size, GFP_KERNEL);
143 if (buf == NULL) 164 if (buf == NULL)
144 return -ENOMEM; 165 return -ENOMEM;
145 166
146 if (common->disable_ani) { 167 len += scnprintf(buf + len, size - len, "%15s: %s\n", "ANI",
147 len += scnprintf(buf + len, size - len, "%s: %s\n", 168 common->disable_ani ? "DISABLED" : "ENABLED");
148 "ANI", "DISABLED"); 169
170 if (common->disable_ani)
149 goto exit; 171 goto exit;
150 }
151 172
152 len += scnprintf(buf + len, size - len, "%15s: %s\n", 173 for (i = 0; i < ARRAY_SIZE(ani_info); i++)
153 "ANI", "ENABLED"); 174 len += scnprintf(buf + len, size - len, "%15s: %u\n",
154 len += scnprintf(buf + len, size - len, "%15s: %u\n", 175 ani_info[i].name, ani_info[i].val);
155 "ANI RESET", ah->stats.ast_ani_reset); 176
156 len += scnprintf(buf + len, size - len, "%15s: %u\n",
157 "SPUR UP", ah->stats.ast_ani_spurup);
158 len += scnprintf(buf + len, size - len, "%15s: %u\n",
159 "SPUR DOWN", ah->stats.ast_ani_spurup);
160 len += scnprintf(buf + len, size - len, "%15s: %u\n",
161 "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
162 len += scnprintf(buf + len, size - len, "%15s: %u\n",
163 "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
164 len += scnprintf(buf + len, size - len, "%15s: %u\n",
165 "MRC-CCK ON", ah->stats.ast_ani_ccklow);
166 len += scnprintf(buf + len, size - len, "%15s: %u\n",
167 "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
168 len += scnprintf(buf + len, size - len, "%15s: %u\n",
169 "FIR-STEP UP", ah->stats.ast_ani_stepup);
170 len += scnprintf(buf + len, size - len, "%15s: %u\n",
171 "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
172 len += scnprintf(buf + len, size - len, "%15s: %u\n",
173 "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
174 len += scnprintf(buf + len, size - len, "%15s: %u\n",
175 "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
176 len += scnprintf(buf + len, size - len, "%15s: %u\n",
177 "CCK ERRORS", ah->stats.ast_ani_cckerrs);
178exit: 177exit:
179 if (len > size) 178 if (len > size)
180 len = size; 179 len = size;
@@ -209,7 +208,7 @@ static ssize_t write_file_ani(struct file *file,
209 common->disable_ani = !ani; 208 common->disable_ani = !ani;
210 209
211 if (common->disable_ani) { 210 if (common->disable_ani) {
212 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags); 211 clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
213 ath_stop_ani(sc); 212 ath_stop_ani(sc);
214 } else { 213 } else {
215 ath_check_ani(sc); 214 ath_check_ani(sc);
@@ -307,13 +306,13 @@ static ssize_t read_file_antenna_diversity(struct file *file,
307 struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN]; 306 struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
308 struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT]; 307 struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
309 struct ath_hw_antcomb_conf div_ant_conf; 308 struct ath_hw_antcomb_conf div_ant_conf;
310 unsigned int len = 0, size = 1024; 309 unsigned int len = 0;
310 const unsigned int size = 1024;
311 ssize_t retval = 0; 311 ssize_t retval = 0;
312 char *buf; 312 char *buf;
313 char *lna_conf_str[4] = {"LNA1_MINUS_LNA2", 313 static const char *lna_conf_str[4] = {
314 "LNA2", 314 "LNA1_MINUS_LNA2", "LNA2", "LNA1", "LNA1_PLUS_LNA2"
315 "LNA1", 315 };
316 "LNA1_PLUS_LNA2"};
317 316
318 buf = kzalloc(size, GFP_KERNEL); 317 buf = kzalloc(size, GFP_KERNEL);
319 if (buf == NULL) 318 if (buf == NULL)
@@ -716,10 +715,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
716 struct ath_softc *sc = file->private_data; 715 struct ath_softc *sc = file->private_data;
717 struct ath_txq *txq; 716 struct ath_txq *txq;
718 char *buf; 717 char *buf;
719 unsigned int len = 0, size = 1024; 718 unsigned int len = 0;
719 const unsigned int size = 1024;
720 ssize_t retval = 0; 720 ssize_t retval = 0;
721 int i; 721 int i;
722 char *qname[4] = {"VO", "VI", "BE", "BK"}; 722 static const char *qname[4] = {
723 "VO", "VI", "BE", "BK"
724 };
723 725
724 buf = kzalloc(size, GFP_KERNEL); 726 buf = kzalloc(size, GFP_KERNEL);
725 if (buf == NULL) 727 if (buf == NULL)
@@ -866,6 +868,12 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
866 "%17s: %2d\n", "PLL RX Hang", 868 "%17s: %2d\n", "PLL RX Hang",
867 sc->debug.stats.reset[RESET_TYPE_PLL_HANG]); 869 sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
868 len += scnprintf(buf + len, sizeof(buf) - len, 870 len += scnprintf(buf + len, sizeof(buf) - len,
871 "%17s: %2d\n", "MAC Hang",
872 sc->debug.stats.reset[RESET_TYPE_MAC_HANG]);
873 len += scnprintf(buf + len, sizeof(buf) - len,
874 "%17s: %2d\n", "Stuck Beacon",
875 sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]);
876 len += scnprintf(buf + len, sizeof(buf) - len,
869 "%17s: %2d\n", "MCI Reset", 877 "%17s: %2d\n", "MCI Reset",
870 sc->debug.stats.reset[RESET_TYPE_MCI]); 878 sc->debug.stats.reset[RESET_TYPE_MCI]);
871 879
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index cc7a025d833e..559a68c2709c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -18,7 +18,6 @@
18#define DEBUG_H 18#define DEBUG_H
19 19
20#include "hw.h" 20#include "hw.h"
21#include "rc.h"
22#include "dfs_debug.h" 21#include "dfs_debug.h"
23 22
24struct ath_txq; 23struct ath_txq;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 0a7ddf4c88c9..7936c9126a20 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -21,6 +21,8 @@
21 21
22#include "hw.h" 22#include "hw.h"
23 23
24struct ath_softc;
25
24/** 26/**
25 * struct ath_dfs_stats - DFS Statistics per wiphy 27 * struct ath_dfs_stats - DFS Statistics per wiphy
26 * @pulses_total: pulses reported by HW 28 * @pulses_total: pulses reported by HW
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6d5d716adc1b..8e7153b186ed 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -54,6 +54,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
54 .driver_info = AR9280_USB }, /* SMC Networks */ 54 .driver_info = AR9280_USB }, /* SMC Networks */
55 { USB_DEVICE(0x0411, 0x017f), 55 { USB_DEVICE(0x0411, 0x017f),
56 .driver_info = AR9280_USB }, /* Sony UWA-BR100 */ 56 .driver_info = AR9280_USB }, /* Sony UWA-BR100 */
57 { USB_DEVICE(0x0411, 0x0197),
58 .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
57 { USB_DEVICE(0x04da, 0x3904), 59 { USB_DEVICE(0x04da, 0x3904),
58 .driver_info = AR9280_USB }, 60 .driver_info = AR9280_USB },
59 61
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 99a203174f45..dab1f0cab993 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -39,7 +39,6 @@
39#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 39#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
40 40
41#define ATH_DEFAULT_BMISS_LIMIT 10 41#define ATH_DEFAULT_BMISS_LIMIT 10
42#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
43#define TSF_TO_TU(_h, _l) \ 42#define TSF_TO_TU(_h, _l) \
44 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 43 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
45 44
@@ -277,7 +276,6 @@ struct ath9k_htc_rxbuf {
277}; 276};
278 277
279struct ath9k_htc_rx { 278struct ath9k_htc_rx {
280 int last_rssi; /* FIXME: per-STA */
281 struct list_head rxbuf; 279 struct list_head rxbuf;
282 spinlock_t rxbuflock; 280 spinlock_t rxbuflock;
283}; 281};
@@ -407,12 +405,18 @@ static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
407#define DEFAULT_SWBA_RESPONSE 40 /* in TUs */ 405#define DEFAULT_SWBA_RESPONSE 40 /* in TUs */
408#define MIN_SWBA_RESPONSE 10 /* in TUs */ 406#define MIN_SWBA_RESPONSE 10 /* in TUs */
409 407
410struct htc_beacon_config { 408struct htc_beacon {
409 enum {
410 OK, /* no change needed */
411 UPDATE, /* update pending */
412 COMMIT /* beacon sent, commit change */
413 } updateslot; /* slot time update fsm */
414
411 struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF]; 415 struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF];
412 u16 beacon_interval; 416 u32 bmisscnt;
413 u16 dtim_period; 417 u32 beaconq;
414 u16 bmiss_timeout; 418 int slottime;
415 u32 bmiss_cnt; 419 int slotupdate;
416}; 420};
417 421
418struct ath_btcoex { 422struct ath_btcoex {
@@ -440,12 +444,8 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
440} 444}
441#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 445#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
442 446
443#define OP_INVALID BIT(0)
444#define OP_SCANNING BIT(1)
445#define OP_ENABLE_BEACON BIT(2)
446#define OP_BT_PRIORITY_DETECTED BIT(3) 447#define OP_BT_PRIORITY_DETECTED BIT(3)
447#define OP_BT_SCAN BIT(4) 448#define OP_BT_SCAN BIT(4)
448#define OP_ANI_RUNNING BIT(5)
449#define OP_TSF_RESET BIT(6) 449#define OP_TSF_RESET BIT(6)
450 450
451struct ath9k_htc_priv { 451struct ath9k_htc_priv {
@@ -488,10 +488,10 @@ struct ath9k_htc_priv {
488 unsigned long op_flags; 488 unsigned long op_flags;
489 489
490 struct ath9k_hw_cal_data caldata; 490 struct ath9k_hw_cal_data caldata;
491 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
492 491
493 spinlock_t beacon_lock; 492 spinlock_t beacon_lock;
494 struct htc_beacon_config cur_beacon_conf; 493 struct ath_beacon_config cur_beacon_conf;
494 struct htc_beacon beacon;
495 495
496 struct ath9k_htc_rx rx; 496 struct ath9k_htc_rx rx;
497 struct ath9k_htc_tx tx; 497 struct ath9k_htc_tx tx;
@@ -516,7 +516,6 @@ struct ath9k_htc_priv {
516 struct work_struct led_work; 516 struct work_struct led_work;
517#endif 517#endif
518 518
519 int beaconq;
520 int cabq; 519 int cabq;
521 int hwq_map[IEEE80211_NUM_ACS]; 520 int hwq_map[IEEE80211_NUM_ACS];
522 521
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 8b5757734596..e8b6ec3c1dbb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -26,7 +26,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
26 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 26 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
27 memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info)); 27 memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
28 28
29 ath9k_hw_get_txq_props(ah, priv->beaconq, &qi); 29 ath9k_hw_get_txq_props(ah, priv->beacon.beaconq, &qi);
30 30
31 if (priv->ah->opmode == NL80211_IFTYPE_AP || 31 if (priv->ah->opmode == NL80211_IFTYPE_AP ||
32 priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) { 32 priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
@@ -54,220 +54,78 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
54 54
55 } 55 }
56 56
57 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) { 57 if (!ath9k_hw_set_txq_props(ah, priv->beacon.beaconq, &qi)) {
58 ath_err(ath9k_hw_common(ah), 58 ath_err(ath9k_hw_common(ah),
59 "Unable to update beacon queue %u!\n", priv->beaconq); 59 "Unable to update beacon queue %u!\n", priv->beacon.beaconq);
60 } else { 60 } else {
61 ath9k_hw_resettxqueue(ah, priv->beaconq); 61 ath9k_hw_resettxqueue(ah, priv->beacon.beaconq);
62 } 62 }
63} 63}
64 64
65 65/*
66static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv, 66 * Both nexttbtt and intval have to be in usecs.
67 struct htc_beacon_config *bss_conf) 67 */
68static void ath9k_htc_beacon_init(struct ath9k_htc_priv *priv,
69 struct ath_beacon_config *conf,
70 bool reset_tsf)
68{ 71{
69 struct ath_common *common = ath9k_hw_common(priv->ah); 72 struct ath_hw *ah = priv->ah;
70 struct ath9k_beacon_state bs;
71 enum ath9k_int imask = 0;
72 int dtimperiod, dtimcount, sleepduration;
73 int bmiss_timeout;
74 u32 nexttbtt = 0, intval, tsftu;
75 __be32 htc_imask = 0;
76 u64 tsf;
77 int num_beacons, offset, dtim_dec_count;
78 int ret __attribute__ ((unused)); 73 int ret __attribute__ ((unused));
74 __be32 htc_imask = 0;
79 u8 cmd_rsp; 75 u8 cmd_rsp;
80 76
81 memset(&bs, 0, sizeof(bs)); 77 if (conf->intval >= TU_TO_USEC(DEFAULT_SWBA_RESPONSE))
82 78 ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
83 intval = bss_conf->beacon_interval; 79 else
84 bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval); 80 ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
85
86 /*
87 * Setup dtim parameters according to
88 * last beacon we received (which may be none).
89 */
90 dtimperiod = bss_conf->dtim_period;
91 if (dtimperiod <= 0) /* NB: 0 if not known */
92 dtimperiod = 1;
93 dtimcount = 1;
94 if (dtimcount >= dtimperiod) /* NB: sanity check */
95 dtimcount = 0;
96
97 sleepduration = intval;
98 if (sleepduration <= 0)
99 sleepduration = intval;
100
101 /*
102 * Pull nexttbtt forward to reflect the current
103 * TSF and calculate dtim state for the result.
104 */
105 tsf = ath9k_hw_gettsf64(priv->ah);
106 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
107
108 num_beacons = tsftu / intval + 1;
109 offset = tsftu % intval;
110 nexttbtt = tsftu - offset;
111 if (offset)
112 nexttbtt += intval;
113
114 /* DTIM Beacon every dtimperiod Beacon */
115 dtim_dec_count = num_beacons % dtimperiod;
116 dtimcount -= dtim_dec_count;
117 if (dtimcount < 0)
118 dtimcount += dtimperiod;
119
120 bs.bs_intval = TU_TO_USEC(intval);
121 bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
122 bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
123 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
124
125 /*
126 * Calculate the number of consecutive beacons to miss* before taking
127 * a BMISS interrupt. The configuration is specified in TU so we only
128 * need calculate based on the beacon interval. Note that we clamp the
129 * result to at most 15 beacons.
130 */
131 if (sleepduration > intval) {
132 bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
133 } else {
134 bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
135 if (bs.bs_bmissthreshold > 15)
136 bs.bs_bmissthreshold = 15;
137 else if (bs.bs_bmissthreshold <= 0)
138 bs.bs_bmissthreshold = 1;
139 }
140
141 /*
142 * Calculate sleep duration. The configuration is given in ms.
143 * We ensure a multiple of the beacon period is used. Also, if the sleep
144 * duration is greater than the DTIM period then it makes senses
145 * to make it a multiple of that.
146 *
147 * XXX fixed at 100ms
148 */
149
150 bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
151 sleepduration));
152 if (bs.bs_sleepduration > bs.bs_dtimperiod)
153 bs.bs_sleepduration = bs.bs_dtimperiod;
154
155 /* TSF out of range threshold fixed at 1 second */
156 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
157
158 ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
159 intval, tsf, tsftu);
160 ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
161 bs.bs_bmissthreshold, bs.bs_sleepduration);
162
163 /* Set the computed STA beacon timers */
164 81
165 WMI_CMD(WMI_DISABLE_INTR_CMDID); 82 WMI_CMD(WMI_DISABLE_INTR_CMDID);
166 ath9k_hw_set_sta_beacon_timers(priv->ah, &bs); 83 if (reset_tsf)
167 imask |= ATH9K_INT_BMISS; 84 ath9k_hw_reset_tsf(ah);
168 htc_imask = cpu_to_be32(imask); 85 ath9k_htc_beaconq_config(priv);
86 ath9k_hw_beaconinit(ah, conf->nexttbtt, conf->intval);
87 priv->beacon.bmisscnt = 0;
88 htc_imask = cpu_to_be32(ah->imask);
169 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 89 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
170} 90}
171 91
172static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv, 92static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
173 struct htc_beacon_config *bss_conf) 93 struct ath_beacon_config *bss_conf)
174{ 94{
175 struct ath_common *common = ath9k_hw_common(priv->ah); 95 struct ath9k_beacon_state bs;
176 enum ath9k_int imask = 0; 96 enum ath9k_int imask = 0;
177 u32 nexttbtt, intval, tsftu;
178 __be32 htc_imask = 0; 97 __be32 htc_imask = 0;
179 int ret __attribute__ ((unused)); 98 int ret __attribute__ ((unused));
180 u8 cmd_rsp; 99 u8 cmd_rsp;
181 u64 tsf;
182 100
183 intval = bss_conf->beacon_interval; 101 if (ath9k_cmn_beacon_config_sta(priv->ah, bss_conf, &bs) == -EPERM)
184 intval /= ATH9K_HTC_MAX_BCN_VIF; 102 return;
185 nexttbtt = intval;
186
187 /*
188 * To reduce beacon misses under heavy TX load,
189 * set the beacon response time to a larger value.
190 */
191 if (intval > DEFAULT_SWBA_RESPONSE)
192 priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
193 else
194 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
195
196 if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
197 ath9k_hw_reset_tsf(priv->ah);
198 clear_bit(OP_TSF_RESET, &priv->op_flags);
199 } else {
200 /*
201 * Pull nexttbtt forward to reflect the current TSF.
202 */
203 tsf = ath9k_hw_gettsf64(priv->ah);
204 tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
205 do {
206 nexttbtt += intval;
207 } while (nexttbtt < tsftu);
208 }
209
210 if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
211 imask |= ATH9K_INT_SWBA;
212
213 ath_dbg(common, CONFIG,
214 "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
215 bss_conf->beacon_interval, nexttbtt,
216 priv->ah->config.sw_beacon_response_time, imask);
217
218 ath9k_htc_beaconq_config(priv);
219 103
220 WMI_CMD(WMI_DISABLE_INTR_CMDID); 104 WMI_CMD(WMI_DISABLE_INTR_CMDID);
221 ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval)); 105 ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
222 priv->cur_beacon_conf.bmiss_cnt = 0; 106 imask |= ATH9K_INT_BMISS;
223 htc_imask = cpu_to_be32(imask); 107 htc_imask = cpu_to_be32(imask);
224 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 108 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
225} 109}
226 110
227static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv, 111static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
228 struct htc_beacon_config *bss_conf) 112 struct ath_beacon_config *conf)
229{ 113{
230 struct ath_common *common = ath9k_hw_common(priv->ah); 114 struct ath_hw *ah = priv->ah;
231 enum ath9k_int imask = 0; 115 ah->imask = 0;
232 u32 nexttbtt, intval, tsftu;
233 __be32 htc_imask = 0;
234 int ret __attribute__ ((unused));
235 u8 cmd_rsp;
236 u64 tsf;
237
238 intval = bss_conf->beacon_interval;
239 nexttbtt = intval;
240
241 /*
242 * Pull nexttbtt forward to reflect the current TSF.
243 */
244 tsf = ath9k_hw_gettsf64(priv->ah);
245 tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
246 do {
247 nexttbtt += intval;
248 } while (nexttbtt < tsftu);
249
250 /*
251 * Only one IBSS interfce is allowed.
252 */
253 if (intval > DEFAULT_SWBA_RESPONSE)
254 priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
255 else
256 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
257 116
258 if (test_bit(OP_ENABLE_BEACON, &priv->op_flags)) 117 ath9k_cmn_beacon_config_ap(ah, conf, ATH9K_HTC_MAX_BCN_VIF);
259 imask |= ATH9K_INT_SWBA; 118 ath9k_htc_beacon_init(priv, conf, false);
119}
260 120
261 ath_dbg(common, CONFIG, 121static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
262 "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n", 122 struct ath_beacon_config *conf)
263 bss_conf->beacon_interval, nexttbtt, 123{
264 priv->ah->config.sw_beacon_response_time, imask); 124 struct ath_hw *ah = priv->ah;
125 ah->imask = 0;
265 126
266 WMI_CMD(WMI_DISABLE_INTR_CMDID); 127 ath9k_cmn_beacon_config_adhoc(ah, conf);
267 ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval)); 128 ath9k_htc_beacon_init(priv, conf, conf->ibss_creator);
268 priv->cur_beacon_conf.bmiss_cnt = 0;
269 htc_imask = cpu_to_be32(imask);
270 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
271} 129}
272 130
273void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, 131void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
@@ -287,7 +145,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
287 145
288 spin_lock_bh(&priv->beacon_lock); 146 spin_lock_bh(&priv->beacon_lock);
289 147
290 vif = priv->cur_beacon_conf.bslot[slot]; 148 vif = priv->beacon.bslot[slot];
291 149
292 skb = ieee80211_get_buffered_bc(priv->hw, vif); 150 skb = ieee80211_get_buffered_bc(priv->hw, vif);
293 151
@@ -348,10 +206,10 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
348 206
349 spin_lock_bh(&priv->beacon_lock); 207 spin_lock_bh(&priv->beacon_lock);
350 208
351 vif = priv->cur_beacon_conf.bslot[slot]; 209 vif = priv->beacon.bslot[slot];
352 avp = (struct ath9k_htc_vif *)vif->drv_priv; 210 avp = (struct ath9k_htc_vif *)vif->drv_priv;
353 211
354 if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) { 212 if (unlikely(test_bit(ATH_OP_SCANNING, &common->op_flags))) {
355 spin_unlock_bh(&priv->beacon_lock); 213 spin_unlock_bh(&priv->beacon_lock);
356 return; 214 return;
357 } 215 }
@@ -431,8 +289,8 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
431 int slot; 289 int slot;
432 290
433 if (swba->beacon_pending != 0) { 291 if (swba->beacon_pending != 0) {
434 priv->cur_beacon_conf.bmiss_cnt++; 292 priv->beacon.bmisscnt++;
435 if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) { 293 if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
436 ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n"); 294 ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
437 ieee80211_queue_work(priv->hw, 295 ieee80211_queue_work(priv->hw,
438 &priv->fatal_work); 296 &priv->fatal_work);
@@ -440,16 +298,16 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
440 return; 298 return;
441 } 299 }
442 300
443 if (priv->cur_beacon_conf.bmiss_cnt) { 301 if (priv->beacon.bmisscnt) {
444 ath_dbg(common, BSTUCK, 302 ath_dbg(common, BSTUCK,
445 "Resuming beacon xmit after %u misses\n", 303 "Resuming beacon xmit after %u misses\n",
446 priv->cur_beacon_conf.bmiss_cnt); 304 priv->beacon.bmisscnt);
447 priv->cur_beacon_conf.bmiss_cnt = 0; 305 priv->beacon.bmisscnt = 0;
448 } 306 }
449 307
450 slot = ath9k_htc_choose_bslot(priv, swba); 308 slot = ath9k_htc_choose_bslot(priv, swba);
451 spin_lock_bh(&priv->beacon_lock); 309 spin_lock_bh(&priv->beacon_lock);
452 if (priv->cur_beacon_conf.bslot[slot] == NULL) { 310 if (priv->beacon.bslot[slot] == NULL) {
453 spin_unlock_bh(&priv->beacon_lock); 311 spin_unlock_bh(&priv->beacon_lock);
454 return; 312 return;
455 } 313 }
@@ -468,13 +326,13 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
468 326
469 spin_lock_bh(&priv->beacon_lock); 327 spin_lock_bh(&priv->beacon_lock);
470 for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) { 328 for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) {
471 if (priv->cur_beacon_conf.bslot[i] == NULL) { 329 if (priv->beacon.bslot[i] == NULL) {
472 avp->bslot = i; 330 avp->bslot = i;
473 break; 331 break;
474 } 332 }
475 } 333 }
476 334
477 priv->cur_beacon_conf.bslot[avp->bslot] = vif; 335 priv->beacon.bslot[avp->bslot] = vif;
478 spin_unlock_bh(&priv->beacon_lock); 336 spin_unlock_bh(&priv->beacon_lock);
479 337
480 ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n", 338 ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
@@ -488,7 +346,7 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
488 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv; 346 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
489 347
490 spin_lock_bh(&priv->beacon_lock); 348 spin_lock_bh(&priv->beacon_lock);
491 priv->cur_beacon_conf.bslot[avp->bslot] = NULL; 349 priv->beacon.bslot[avp->bslot] = NULL;
492 spin_unlock_bh(&priv->beacon_lock); 350 spin_unlock_bh(&priv->beacon_lock);
493 351
494 ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n", 352 ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
@@ -504,7 +362,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
504{ 362{
505 struct ath_common *common = ath9k_hw_common(priv->ah); 363 struct ath_common *common = ath9k_hw_common(priv->ah);
506 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv; 364 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
507 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; 365 struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
508 u64 tsfadjust; 366 u64 tsfadjust;
509 367
510 if (avp->bslot == 0) 368 if (avp->bslot == 0)
@@ -536,7 +394,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
536 struct ieee80211_vif *vif) 394 struct ieee80211_vif *vif)
537{ 395{
538 struct ath_common *common = ath9k_hw_common(priv->ah); 396 struct ath_common *common = ath9k_hw_common(priv->ah);
539 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; 397 struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
540 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 398 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
541 bool beacon_configured; 399 bool beacon_configured;
542 400
@@ -591,7 +449,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
591 struct ieee80211_vif *vif) 449 struct ieee80211_vif *vif)
592{ 450{
593 struct ath_common *common = ath9k_hw_common(priv->ah); 451 struct ath_common *common = ath9k_hw_common(priv->ah);
594 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; 452 struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
595 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 453 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
596 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; 454 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
597 455
@@ -627,7 +485,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
627void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv) 485void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
628{ 486{
629 struct ath_common *common = ath9k_hw_common(priv->ah); 487 struct ath_common *common = ath9k_hw_common(priv->ah);
630 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; 488 struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
631 489
632 switch (priv->ah->opmode) { 490 switch (priv->ah->opmode) {
633 case NL80211_IFTYPE_STATION: 491 case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c57d6b859c04..8a3bd5fe3a54 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -38,93 +38,6 @@ static int ath9k_ps_enable;
38module_param_named(ps_enable, ath9k_ps_enable, int, 0444); 38module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
39MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); 39MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
40 40
41#define CHAN2G(_freq, _idx) { \
42 .center_freq = (_freq), \
43 .hw_value = (_idx), \
44 .max_power = 20, \
45}
46
47#define CHAN5G(_freq, _idx) { \
48 .band = IEEE80211_BAND_5GHZ, \
49 .center_freq = (_freq), \
50 .hw_value = (_idx), \
51 .max_power = 20, \
52}
53
54static struct ieee80211_channel ath9k_2ghz_channels[] = {
55 CHAN2G(2412, 0), /* Channel 1 */
56 CHAN2G(2417, 1), /* Channel 2 */
57 CHAN2G(2422, 2), /* Channel 3 */
58 CHAN2G(2427, 3), /* Channel 4 */
59 CHAN2G(2432, 4), /* Channel 5 */
60 CHAN2G(2437, 5), /* Channel 6 */
61 CHAN2G(2442, 6), /* Channel 7 */
62 CHAN2G(2447, 7), /* Channel 8 */
63 CHAN2G(2452, 8), /* Channel 9 */
64 CHAN2G(2457, 9), /* Channel 10 */
65 CHAN2G(2462, 10), /* Channel 11 */
66 CHAN2G(2467, 11), /* Channel 12 */
67 CHAN2G(2472, 12), /* Channel 13 */
68 CHAN2G(2484, 13), /* Channel 14 */
69};
70
71static struct ieee80211_channel ath9k_5ghz_channels[] = {
72 /* _We_ call this UNII 1 */
73 CHAN5G(5180, 14), /* Channel 36 */
74 CHAN5G(5200, 15), /* Channel 40 */
75 CHAN5G(5220, 16), /* Channel 44 */
76 CHAN5G(5240, 17), /* Channel 48 */
77 /* _We_ call this UNII 2 */
78 CHAN5G(5260, 18), /* Channel 52 */
79 CHAN5G(5280, 19), /* Channel 56 */
80 CHAN5G(5300, 20), /* Channel 60 */
81 CHAN5G(5320, 21), /* Channel 64 */
82 /* _We_ call this "Middle band" */
83 CHAN5G(5500, 22), /* Channel 100 */
84 CHAN5G(5520, 23), /* Channel 104 */
85 CHAN5G(5540, 24), /* Channel 108 */
86 CHAN5G(5560, 25), /* Channel 112 */
87 CHAN5G(5580, 26), /* Channel 116 */
88 CHAN5G(5600, 27), /* Channel 120 */
89 CHAN5G(5620, 28), /* Channel 124 */
90 CHAN5G(5640, 29), /* Channel 128 */
91 CHAN5G(5660, 30), /* Channel 132 */
92 CHAN5G(5680, 31), /* Channel 136 */
93 CHAN5G(5700, 32), /* Channel 140 */
94 /* _We_ call this UNII 3 */
95 CHAN5G(5745, 33), /* Channel 149 */
96 CHAN5G(5765, 34), /* Channel 153 */
97 CHAN5G(5785, 35), /* Channel 157 */
98 CHAN5G(5805, 36), /* Channel 161 */
99 CHAN5G(5825, 37), /* Channel 165 */
100};
101
102/* Atheros hardware rate code addition for short premble */
103#define SHPCHECK(__hw_rate, __flags) \
104 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
105
106#define RATE(_bitrate, _hw_rate, _flags) { \
107 .bitrate = (_bitrate), \
108 .flags = (_flags), \
109 .hw_value = (_hw_rate), \
110 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
111}
112
113static struct ieee80211_rate ath9k_legacy_rates[] = {
114 RATE(10, 0x1b, 0),
115 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
116 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
117 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
118 RATE(60, 0x0b, 0),
119 RATE(90, 0x0f, 0),
120 RATE(120, 0x0a, 0),
121 RATE(180, 0x0e, 0),
122 RATE(240, 0x09, 0),
123 RATE(360, 0x0d, 0),
124 RATE(480, 0x08, 0),
125 RATE(540, 0x0c, 0),
126};
127
128#ifdef CONFIG_MAC80211_LEDS 41#ifdef CONFIG_MAC80211_LEDS
129static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = { 42static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
130 { .throughput = 0 * 1024, .blink_time = 334 }, 43 { .throughput = 0 * 1024, .blink_time = 334 },
@@ -343,6 +256,25 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
343 } 256 }
344} 257}
345 258
259static void ath9k_regwrite_multi(struct ath_common *common)
260{
261 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
262 u32 rsp_status;
263 int r;
264
265 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
266 (u8 *) &priv->wmi->multi_write,
267 sizeof(struct register_write) * priv->wmi->multi_write_idx,
268 (u8 *) &rsp_status, sizeof(rsp_status),
269 100);
270 if (unlikely(r)) {
271 ath_dbg(common, WMI,
272 "REGISTER WRITE FAILED, multi len: %d\n",
273 priv->wmi->multi_write_idx);
274 }
275 priv->wmi->multi_write_idx = 0;
276}
277
346static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) 278static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
347{ 279{
348 struct ath_hw *ah = (struct ath_hw *) hw_priv; 280 struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -369,8 +301,6 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
369 struct ath_hw *ah = (struct ath_hw *) hw_priv; 301 struct ath_hw *ah = (struct ath_hw *) hw_priv;
370 struct ath_common *common = ath9k_hw_common(ah); 302 struct ath_common *common = ath9k_hw_common(ah);
371 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 303 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
372 u32 rsp_status;
373 int r;
374 304
375 mutex_lock(&priv->wmi->multi_write_mutex); 305 mutex_lock(&priv->wmi->multi_write_mutex);
376 306
@@ -383,19 +313,8 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
383 priv->wmi->multi_write_idx++; 313 priv->wmi->multi_write_idx++;
384 314
385 /* If the buffer is full, send it out. */ 315 /* If the buffer is full, send it out. */
386 if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) { 316 if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER)
387 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID, 317 ath9k_regwrite_multi(common);
388 (u8 *) &priv->wmi->multi_write,
389 sizeof(struct register_write) * priv->wmi->multi_write_idx,
390 (u8 *) &rsp_status, sizeof(rsp_status),
391 100);
392 if (unlikely(r)) {
393 ath_dbg(common, WMI,
394 "REGISTER WRITE FAILED, multi len: %d\n",
395 priv->wmi->multi_write_idx);
396 }
397 priv->wmi->multi_write_idx = 0;
398 }
399 318
400 mutex_unlock(&priv->wmi->multi_write_mutex); 319 mutex_unlock(&priv->wmi->multi_write_mutex);
401} 320}
@@ -426,26 +345,13 @@ static void ath9k_regwrite_flush(void *hw_priv)
426 struct ath_hw *ah = (struct ath_hw *) hw_priv; 345 struct ath_hw *ah = (struct ath_hw *) hw_priv;
427 struct ath_common *common = ath9k_hw_common(ah); 346 struct ath_common *common = ath9k_hw_common(ah);
428 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 347 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
429 u32 rsp_status;
430 int r;
431 348
432 atomic_dec(&priv->wmi->mwrite_cnt); 349 atomic_dec(&priv->wmi->mwrite_cnt);
433 350
434 mutex_lock(&priv->wmi->multi_write_mutex); 351 mutex_lock(&priv->wmi->multi_write_mutex);
435 352
436 if (priv->wmi->multi_write_idx) { 353 if (priv->wmi->multi_write_idx)
437 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID, 354 ath9k_regwrite_multi(common);
438 (u8 *) &priv->wmi->multi_write,
439 sizeof(struct register_write) * priv->wmi->multi_write_idx,
440 (u8 *) &rsp_status, sizeof(rsp_status),
441 100);
442 if (unlikely(r)) {
443 ath_dbg(common, WMI,
444 "REGISTER WRITE FAILED, multi len: %d\n",
445 priv->wmi->multi_write_idx);
446 }
447 priv->wmi->multi_write_idx = 0;
448 }
449 355
450 mutex_unlock(&priv->wmi->multi_write_mutex); 356 mutex_unlock(&priv->wmi->multi_write_mutex);
451} 357}
@@ -491,51 +397,6 @@ static const struct ath_bus_ops ath9k_usb_bus_ops = {
491 .eeprom_read = ath_usb_eeprom_read, 397 .eeprom_read = ath_usb_eeprom_read,
492}; 398};
493 399
494static void setup_ht_cap(struct ath9k_htc_priv *priv,
495 struct ieee80211_sta_ht_cap *ht_info)
496{
497 struct ath_common *common = ath9k_hw_common(priv->ah);
498 u8 tx_streams, rx_streams;
499 int i;
500
501 ht_info->ht_supported = true;
502 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
503 IEEE80211_HT_CAP_SM_PS |
504 IEEE80211_HT_CAP_SGI_40 |
505 IEEE80211_HT_CAP_DSSSCCK40;
506
507 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
508 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
509
510 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
511
512 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
513 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
514
515 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
516
517 /* ath9k_htc supports only 1 or 2 stream devices */
518 tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
519 rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
520
521 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
522 tx_streams, rx_streams);
523
524 if (tx_streams >= 2)
525 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
526
527 if (tx_streams != rx_streams) {
528 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
529 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
530 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
531 }
532
533 for (i = 0; i < rx_streams; i++)
534 ht_info->mcs.rx_mask[i] = 0xff;
535
536 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
537}
538
539static int ath9k_init_queues(struct ath9k_htc_priv *priv) 400static int ath9k_init_queues(struct ath9k_htc_priv *priv)
540{ 401{
541 struct ath_common *common = ath9k_hw_common(priv->ah); 402 struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -544,8 +405,8 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
544 for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++) 405 for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
545 priv->hwq_map[i] = -1; 406 priv->hwq_map[i] = -1;
546 407
547 priv->beaconq = ath9k_hw_beaconq_setup(priv->ah); 408 priv->beacon.beaconq = ath9k_hw_beaconq_setup(priv->ah);
548 if (priv->beaconq == -1) { 409 if (priv->beacon.beaconq == -1) {
549 ath_err(common, "Unable to setup BEACON xmit queue\n"); 410 ath_err(common, "Unable to setup BEACON xmit queue\n");
550 goto err; 411 goto err;
551 } 412 }
@@ -580,37 +441,13 @@ err:
580 return -EINVAL; 441 return -EINVAL;
581} 442}
582 443
583static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
584{
585 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
586 priv->sbands[IEEE80211_BAND_2GHZ].channels =
587 ath9k_2ghz_channels;
588 priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
589 priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
590 ARRAY_SIZE(ath9k_2ghz_channels);
591 priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
592 priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
593 ARRAY_SIZE(ath9k_legacy_rates);
594 }
595
596 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
597 priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
598 priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
599 priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
600 ARRAY_SIZE(ath9k_5ghz_channels);
601 priv->sbands[IEEE80211_BAND_5GHZ].bitrates =
602 ath9k_legacy_rates + 4;
603 priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
604 ARRAY_SIZE(ath9k_legacy_rates) - 4;
605 }
606}
607
608static void ath9k_init_misc(struct ath9k_htc_priv *priv) 444static void ath9k_init_misc(struct ath9k_htc_priv *priv)
609{ 445{
610 struct ath_common *common = ath9k_hw_common(priv->ah); 446 struct ath_common *common = ath9k_hw_common(priv->ah);
611 447
612 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 448 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
613 449
450 common->last_rssi = ATH_RSSI_DUMMY_MARKER;
614 priv->ah->opmode = NL80211_IFTYPE_STATION; 451 priv->ah->opmode = NL80211_IFTYPE_STATION;
615} 452}
616 453
@@ -622,12 +459,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
622 struct ath_common *common; 459 struct ath_common *common;
623 int i, ret = 0, csz = 0; 460 int i, ret = 0, csz = 0;
624 461
625 set_bit(OP_INVALID, &priv->op_flags);
626
627 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 462 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
628 if (!ah) 463 if (!ah)
629 return -ENOMEM; 464 return -ENOMEM;
630 465
466 ah->dev = priv->dev;
631 ah->hw_version.devid = devid; 467 ah->hw_version.devid = devid;
632 ah->hw_version.usbdev = drv_info; 468 ah->hw_version.usbdev = drv_info;
633 ah->ah_flags |= AH_USE_EEPROM; 469 ah->ah_flags |= AH_USE_EEPROM;
@@ -647,6 +483,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
647 common->priv = priv; 483 common->priv = priv;
648 common->debug_mask = ath9k_debug; 484 common->debug_mask = ath9k_debug;
649 common->btcoex_enabled = ath9k_htc_btcoex_enable == 1; 485 common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
486 set_bit(ATH_OP_INVALID, &common->op_flags);
650 487
651 spin_lock_init(&priv->beacon_lock); 488 spin_lock_init(&priv->beacon_lock);
652 spin_lock_init(&priv->tx.tx_lock); 489 spin_lock_init(&priv->tx.tx_lock);
@@ -682,10 +519,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
682 goto err_queues; 519 goto err_queues;
683 520
684 for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) 521 for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
685 priv->cur_beacon_conf.bslot[i] = NULL; 522 priv->beacon.bslot[i] = NULL;
523 priv->beacon.slottime = ATH9K_SLOT_TIME_9;
686 524
525 ath9k_cmn_init_channels_rates(common);
687 ath9k_cmn_init_crypto(ah); 526 ath9k_cmn_init_crypto(ah);
688 ath9k_init_channels_rates(priv);
689 ath9k_init_misc(priv); 527 ath9k_init_misc(priv);
690 ath9k_htc_init_btcoex(priv, product); 528 ath9k_htc_init_btcoex(priv, product);
691 529
@@ -721,6 +559,7 @@ static const struct ieee80211_iface_combination if_comb = {
721static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, 559static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
722 struct ieee80211_hw *hw) 560 struct ieee80211_hw *hw)
723{ 561{
562 struct ath_hw *ah = priv->ah;
724 struct ath_common *common = ath9k_hw_common(priv->ah); 563 struct ath_common *common = ath9k_hw_common(priv->ah);
725 struct base_eep_header *pBase; 564 struct base_eep_header *pBase;
726 565
@@ -765,19 +604,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
765 604
766 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 605 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
767 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 606 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
768 &priv->sbands[IEEE80211_BAND_2GHZ]; 607 &common->sbands[IEEE80211_BAND_2GHZ];
769 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 608 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
770 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 609 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
771 &priv->sbands[IEEE80211_BAND_5GHZ]; 610 &common->sbands[IEEE80211_BAND_5GHZ];
772 611
773 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 612 ath9k_cmn_reload_chainmask(ah);
774 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
775 setup_ht_cap(priv,
776 &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
777 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
778 setup_ht_cap(priv,
779 &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
780 }
781 613
782 pBase = ath9k_htc_get_eeprom_base(priv); 614 pBase = ath9k_htc_get_eeprom_base(priv);
783 if (pBase) { 615 if (pBase) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c9254a61ca52..f46cd0250e48 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -250,7 +250,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
250 u8 cmd_rsp; 250 u8 cmd_rsp;
251 int ret; 251 int ret;
252 252
253 if (test_bit(OP_INVALID, &priv->op_flags)) 253 if (test_bit(ATH_OP_INVALID, &common->op_flags))
254 return -EIO; 254 return -EIO;
255 255
256 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL); 256 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +304,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
304 304
305 htc_start(priv->htc); 305 htc_start(priv->htc);
306 306
307 if (!test_bit(OP_SCANNING, &priv->op_flags) && 307 if (!test_bit(ATH_OP_SCANNING, &common->op_flags) &&
308 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) 308 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
309 ath9k_htc_vif_reconfig(priv); 309 ath9k_htc_vif_reconfig(priv);
310 310
@@ -748,7 +748,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
748 common->ani.shortcal_timer = timestamp; 748 common->ani.shortcal_timer = timestamp;
749 common->ani.checkani_timer = timestamp; 749 common->ani.checkani_timer = timestamp;
750 750
751 set_bit(OP_ANI_RUNNING, &priv->op_flags); 751 set_bit(ATH_OP_ANI_RUN, &common->op_flags);
752 752
753 ieee80211_queue_delayed_work(common->hw, &priv->ani_work, 753 ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
754 msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 754 msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -756,8 +756,9 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
756 756
757void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv) 757void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
758{ 758{
759 struct ath_common *common = ath9k_hw_common(priv->ah);
759 cancel_delayed_work_sync(&priv->ani_work); 760 cancel_delayed_work_sync(&priv->ani_work);
760 clear_bit(OP_ANI_RUNNING, &priv->op_flags); 761 clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
761} 762}
762 763
763void ath9k_htc_ani_work(struct work_struct *work) 764void ath9k_htc_ani_work(struct work_struct *work)
@@ -942,7 +943,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
942 ath_dbg(common, CONFIG, 943 ath_dbg(common, CONFIG,
943 "Failed to update capability in target\n"); 944 "Failed to update capability in target\n");
944 945
945 clear_bit(OP_INVALID, &priv->op_flags); 946 clear_bit(ATH_OP_INVALID, &common->op_flags);
946 htc_start(priv->htc); 947 htc_start(priv->htc);
947 948
948 spin_lock_bh(&priv->tx.tx_lock); 949 spin_lock_bh(&priv->tx.tx_lock);
@@ -971,7 +972,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
971 972
972 mutex_lock(&priv->mutex); 973 mutex_lock(&priv->mutex);
973 974
974 if (test_bit(OP_INVALID, &priv->op_flags)) { 975 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
975 ath_dbg(common, ANY, "Device not present\n"); 976 ath_dbg(common, ANY, "Device not present\n");
976 mutex_unlock(&priv->mutex); 977 mutex_unlock(&priv->mutex);
977 return; 978 return;
@@ -1013,7 +1014,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1013 ath9k_htc_ps_restore(priv); 1014 ath9k_htc_ps_restore(priv);
1014 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); 1015 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1015 1016
1016 set_bit(OP_INVALID, &priv->op_flags); 1017 set_bit(ATH_OP_INVALID, &common->op_flags);
1017 1018
1018 ath_dbg(common, CONFIG, "Driver halt\n"); 1019 ath_dbg(common, CONFIG, "Driver halt\n");
1019 mutex_unlock(&priv->mutex); 1020 mutex_unlock(&priv->mutex);
@@ -1087,7 +1088,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1087 ath9k_htc_set_opmode(priv); 1088 ath9k_htc_set_opmode(priv);
1088 1089
1089 if ((priv->ah->opmode == NL80211_IFTYPE_AP) && 1090 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1090 !test_bit(OP_ANI_RUNNING, &priv->op_flags)) { 1091 !test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
1091 ath9k_hw_set_tsfadjust(priv->ah, true); 1092 ath9k_hw_set_tsfadjust(priv->ah, true);
1092 ath9k_htc_start_ani(priv); 1093 ath9k_htc_start_ani(priv);
1093 } 1094 }
@@ -1245,13 +1246,14 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1245 u64 multicast) 1246 u64 multicast)
1246{ 1247{
1247 struct ath9k_htc_priv *priv = hw->priv; 1248 struct ath9k_htc_priv *priv = hw->priv;
1249 struct ath_common *common = ath9k_hw_common(priv->ah);
1248 u32 rfilt; 1250 u32 rfilt;
1249 1251
1250 mutex_lock(&priv->mutex); 1252 mutex_lock(&priv->mutex);
1251 changed_flags &= SUPPORTED_FILTERS; 1253 changed_flags &= SUPPORTED_FILTERS;
1252 *total_flags &= SUPPORTED_FILTERS; 1254 *total_flags &= SUPPORTED_FILTERS;
1253 1255
1254 if (test_bit(OP_INVALID, &priv->op_flags)) { 1256 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
1255 ath_dbg(ath9k_hw_common(priv->ah), ANY, 1257 ath_dbg(ath9k_hw_common(priv->ah), ANY,
1256 "Unable to configure filter on invalid state\n"); 1258 "Unable to configure filter on invalid state\n");
1257 mutex_unlock(&priv->mutex); 1259 mutex_unlock(&priv->mutex);
@@ -1474,7 +1476,9 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1474 1476
1475 if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) { 1477 if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
1476 common->curaid = bss_conf->aid; 1478 common->curaid = bss_conf->aid;
1479 common->last_rssi = ATH_RSSI_DUMMY_MARKER;
1477 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1480 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1481 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1478 } 1482 }
1479} 1483}
1480 1484
@@ -1496,6 +1500,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1496 struct ath9k_htc_priv *priv = hw->priv; 1500 struct ath9k_htc_priv *priv = hw->priv;
1497 struct ath_hw *ah = priv->ah; 1501 struct ath_hw *ah = priv->ah;
1498 struct ath_common *common = ath9k_hw_common(ah); 1502 struct ath_common *common = ath9k_hw_common(ah);
1503 int slottime;
1499 1504
1500 mutex_lock(&priv->mutex); 1505 mutex_lock(&priv->mutex);
1501 ath9k_htc_ps_wakeup(priv); 1506 ath9k_htc_ps_wakeup(priv);
@@ -1507,6 +1512,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1507 bss_conf->assoc ? 1512 bss_conf->assoc ?
1508 priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--; 1513 priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
1509 1514
1515 if (!bss_conf->assoc)
1516 clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1517
1510 if (priv->ah->opmode == NL80211_IFTYPE_STATION) { 1518 if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
1511 ath9k_htc_choose_set_bssid(priv); 1519 ath9k_htc_choose_set_bssid(priv);
1512 if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1)) 1520 if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
@@ -1528,7 +1536,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1528 ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n", 1536 ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
1529 bss_conf->bssid); 1537 bss_conf->bssid);
1530 ath9k_htc_set_tsfadjust(priv, vif); 1538 ath9k_htc_set_tsfadjust(priv, vif);
1531 set_bit(OP_ENABLE_BEACON, &priv->op_flags); 1539 priv->cur_beacon_conf.enable_beacon = 1;
1532 ath9k_htc_beacon_config(priv, vif); 1540 ath9k_htc_beacon_config(priv, vif);
1533 } 1541 }
1534 1542
@@ -1542,7 +1550,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1542 ath_dbg(common, CONFIG, 1550 ath_dbg(common, CONFIG,
1543 "Beacon disabled for BSS: %pM\n", 1551 "Beacon disabled for BSS: %pM\n",
1544 bss_conf->bssid); 1552 bss_conf->bssid);
1545 clear_bit(OP_ENABLE_BEACON, &priv->op_flags); 1553 priv->cur_beacon_conf.enable_beacon = 0;
1546 ath9k_htc_beacon_config(priv, vif); 1554 ath9k_htc_beacon_config(priv, vif);
1547 } 1555 }
1548 } 1556 }
@@ -1568,11 +1576,21 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1568 1576
1569 if (changed & BSS_CHANGED_ERP_SLOT) { 1577 if (changed & BSS_CHANGED_ERP_SLOT) {
1570 if (bss_conf->use_short_slot) 1578 if (bss_conf->use_short_slot)
1571 ah->slottime = 9; 1579 slottime = 9;
1572 else 1580 else
1573 ah->slottime = 20; 1581 slottime = 20;
1574 1582 if (vif->type == NL80211_IFTYPE_AP) {
1575 ath9k_hw_init_global_settings(ah); 1583 /*
1584 * Defer update, so that connected stations can adjust
1585 * their settings at the same time.
1586 * See beacon.c for more details
1587 */
1588 priv->beacon.slottime = slottime;
1589 priv->beacon.updateslot = UPDATE;
1590 } else {
1591 ah->slottime = slottime;
1592 ath9k_hw_init_global_settings(ah);
1593 }
1576 } 1594 }
1577 1595
1578 if (changed & BSS_CHANGED_HT) 1596 if (changed & BSS_CHANGED_HT)
@@ -1669,10 +1687,11 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1669static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw) 1687static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1670{ 1688{
1671 struct ath9k_htc_priv *priv = hw->priv; 1689 struct ath9k_htc_priv *priv = hw->priv;
1690 struct ath_common *common = ath9k_hw_common(priv->ah);
1672 1691
1673 mutex_lock(&priv->mutex); 1692 mutex_lock(&priv->mutex);
1674 spin_lock_bh(&priv->beacon_lock); 1693 spin_lock_bh(&priv->beacon_lock);
1675 set_bit(OP_SCANNING, &priv->op_flags); 1694 set_bit(ATH_OP_SCANNING, &common->op_flags);
1676 spin_unlock_bh(&priv->beacon_lock); 1695 spin_unlock_bh(&priv->beacon_lock);
1677 cancel_work_sync(&priv->ps_work); 1696 cancel_work_sync(&priv->ps_work);
1678 ath9k_htc_stop_ani(priv); 1697 ath9k_htc_stop_ani(priv);
@@ -1682,10 +1701,11 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1682static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw) 1701static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1683{ 1702{
1684 struct ath9k_htc_priv *priv = hw->priv; 1703 struct ath9k_htc_priv *priv = hw->priv;
1704 struct ath_common *common = ath9k_hw_common(priv->ah);
1685 1705
1686 mutex_lock(&priv->mutex); 1706 mutex_lock(&priv->mutex);
1687 spin_lock_bh(&priv->beacon_lock); 1707 spin_lock_bh(&priv->beacon_lock);
1688 clear_bit(OP_SCANNING, &priv->op_flags); 1708 clear_bit(ATH_OP_SCANNING, &common->op_flags);
1689 spin_unlock_bh(&priv->beacon_lock); 1709 spin_unlock_bh(&priv->beacon_lock);
1690 ath9k_htc_ps_wakeup(priv); 1710 ath9k_htc_ps_wakeup(priv);
1691 ath9k_htc_vif_reconfig(priv); 1711 ath9k_htc_vif_reconfig(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 12e0f32a4905..e8149e3dbdd5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -924,46 +924,43 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
924 924
925void ath9k_host_rx_init(struct ath9k_htc_priv *priv) 925void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
926{ 926{
927 struct ath_common *common = ath9k_hw_common(priv->ah);
927 ath9k_hw_rxena(priv->ah); 928 ath9k_hw_rxena(priv->ah);
928 ath9k_htc_opmode_init(priv); 929 ath9k_htc_opmode_init(priv);
929 ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags)); 930 ath9k_hw_startpcureceive(priv->ah, test_bit(ATH_OP_SCANNING, &common->op_flags));
930 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
931} 931}
932 932
933static void ath9k_process_rate(struct ieee80211_hw *hw, 933static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
934 struct ieee80211_rx_status *rxs, 934 struct ath_htc_rx_status *rxstatus)
935 u8 rx_rate, u8 rs_flags)
936{ 935{
937 struct ieee80211_supported_band *sband; 936 rx_stats->flag = 0;
938 enum ieee80211_band band; 937 if (rxstatus->rs_flags & ATH9K_RX_2040)
939 unsigned int i = 0; 938 rx_stats->flag |= RX_FLAG_40MHZ;
940 939 if (rxstatus->rs_flags & ATH9K_RX_GI)
941 if (rx_rate & 0x80) { 940 rx_stats->flag |= RX_FLAG_SHORT_GI;
942 /* HT rate */ 941}
943 rxs->flag |= RX_FLAG_HT;
944 if (rs_flags & ATH9K_RX_2040)
945 rxs->flag |= RX_FLAG_40MHZ;
946 if (rs_flags & ATH9K_RX_GI)
947 rxs->flag |= RX_FLAG_SHORT_GI;
948 rxs->rate_idx = rx_rate & 0x7f;
949 return;
950 }
951
952 band = hw->conf.chandef.chan->band;
953 sband = hw->wiphy->bands[band];
954
955 for (i = 0; i < sband->n_bitrates; i++) {
956 if (sband->bitrates[i].hw_value == rx_rate) {
957 rxs->rate_idx = i;
958 return;
959 }
960 if (sband->bitrates[i].hw_value_short == rx_rate) {
961 rxs->rate_idx = i;
962 rxs->flag |= RX_FLAG_SHORTPRE;
963 return;
964 }
965 }
966 942
943static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
944 struct ath_htc_rx_status *rxstatus)
945{
946 rx_stats->rs_datalen = rxstatus->rs_datalen;
947 rx_stats->rs_status = rxstatus->rs_status;
948 rx_stats->rs_phyerr = rxstatus->rs_phyerr;
949 rx_stats->rs_rssi = rxstatus->rs_rssi;
950 rx_stats->rs_keyix = rxstatus->rs_keyix;
951 rx_stats->rs_rate = rxstatus->rs_rate;
952 rx_stats->rs_antenna = rxstatus->rs_antenna;
953 rx_stats->rs_more = rxstatus->rs_more;
954
955 memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
956 sizeof(rx_stats->rs_rssi_ctl));
957 memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
958 sizeof(rx_stats->rs_rssi_ext));
959
960 rx_stats->rs_isaggr = rxstatus->rs_isaggr;
961 rx_stats->rs_moreaggr = rxstatus->rs_moreaggr;
962 rx_stats->rs_num_delims = rxstatus->rs_num_delims;
963 convert_htc_flag(rx_stats, rxstatus);
967} 964}
968 965
969static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, 966static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
@@ -975,10 +972,10 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
975 struct ieee80211_hw *hw = priv->hw; 972 struct ieee80211_hw *hw = priv->hw;
976 struct sk_buff *skb = rxbuf->skb; 973 struct sk_buff *skb = rxbuf->skb;
977 struct ath_common *common = ath9k_hw_common(priv->ah); 974 struct ath_common *common = ath9k_hw_common(priv->ah);
975 struct ath_hw *ah = common->ah;
978 struct ath_htc_rx_status *rxstatus; 976 struct ath_htc_rx_status *rxstatus;
979 int hdrlen, padsize; 977 struct ath_rx_status rx_stats;
980 int last_rssi = ATH_RSSI_DUMMY_MARKER; 978 bool decrypt_error;
981 __le16 fc;
982 979
983 if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { 980 if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
984 ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", 981 ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -999,103 +996,39 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
999 ath9k_htc_err_stat_rx(priv, rxstatus); 996 ath9k_htc_err_stat_rx(priv, rxstatus);
1000 997
1001 /* Get the RX status information */ 998 /* Get the RX status information */
1002 memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
1003 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
1004
1005 hdr = (struct ieee80211_hdr *)skb->data;
1006 fc = hdr->frame_control;
1007 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1008
1009 padsize = hdrlen & 3;
1010 if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
1011 memmove(skb->data + padsize, skb->data, hdrlen);
1012 skb_pull(skb, padsize);
1013 }
1014 999
1015 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 1000 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
1016 1001
1017 if (rxbuf->rxstatus.rs_status != 0) { 1002 /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
1018 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC) 1003 * After this, we can drop this part of skb. */
1019 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 1004 rx_status_htc_to_ath(&rx_stats, rxstatus);
1020 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY) 1005 rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
1021 goto rx_next; 1006 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
1022
1023 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
1024 /* FIXME */
1025 } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
1026 if (ieee80211_is_ctl(fc))
1027 /*
1028 * Sometimes, we get invalid
1029 * MIC failures on valid control frames.
1030 * Remove these mic errors.
1031 */
1032 rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
1033 else
1034 rx_status->flag |= RX_FLAG_MMIC_ERROR;
1035 }
1036
1037 /*
1038 * Reject error frames with the exception of
1039 * decryption and MIC failures. For monitor mode,
1040 * we also ignore the CRC error.
1041 */
1042 if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
1043 if (rxbuf->rxstatus.rs_status &
1044 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
1045 ATH9K_RXERR_CRC))
1046 goto rx_next;
1047 } else {
1048 if (rxbuf->rxstatus.rs_status &
1049 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
1050 goto rx_next;
1051 }
1052 }
1053 }
1054
1055 if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
1056 u8 keyix;
1057 keyix = rxbuf->rxstatus.rs_keyix;
1058 if (keyix != ATH9K_RXKEYIX_INVALID) {
1059 rx_status->flag |= RX_FLAG_DECRYPTED;
1060 } else if (ieee80211_has_protected(fc) &&
1061 skb->len >= hdrlen + 4) {
1062 keyix = skb->data[hdrlen + 3] >> 6;
1063 if (test_bit(keyix, common->keymap))
1064 rx_status->flag |= RX_FLAG_DECRYPTED;
1065 }
1066 }
1067
1068 ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
1069 rxbuf->rxstatus.rs_flags);
1070
1071 if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
1072 !rxbuf->rxstatus.rs_moreaggr)
1073 ATH_RSSI_LPF(priv->rx.last_rssi,
1074 rxbuf->rxstatus.rs_rssi);
1075
1076 last_rssi = priv->rx.last_rssi;
1077 1007
1078 if (ath_is_mybeacon(common, hdr)) { 1008 /*
1079 s8 rssi = rxbuf->rxstatus.rs_rssi; 1009 * everything but the rate is checked here, the rate check is done
1010 * separately to avoid doing two lookups for a rate for each frame.
1011 */
1012 hdr = (struct ieee80211_hdr *)skb->data;
1013 if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
1014 &decrypt_error, priv->rxfilter))
1015 goto rx_next;
1080 1016
1081 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 1017 ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
1082 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 1018 rx_status, decrypt_error);
1083 1019
1084 if (rssi < 0) 1020 if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
1085 rssi = 0; 1021 goto rx_next;
1086 1022
1087 priv->ah->stats.avgbrssi = rssi; 1023 rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
1088 } 1024 ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
1089 1025
1090 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); 1026 rx_status->band = ah->curchan->chan->band;
1091 rx_status->band = hw->conf.chandef.chan->band; 1027 rx_status->freq = ah->curchan->chan->center_freq;
1092 rx_status->freq = hw->conf.chandef.chan->center_freq; 1028 rx_status->antenna = rx_stats.rs_antenna;
1093 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
1094 rx_status->antenna = rxbuf->rxstatus.rs_antenna;
1095 rx_status->flag |= RX_FLAG_MACTIME_END; 1029 rx_status->flag |= RX_FLAG_MACTIME_END;
1096 1030
1097 return true; 1031 return true;
1098
1099rx_next: 1032rx_next:
1100 return false; 1033 return false;
1101} 1034}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index aac4a406a513..a0ff5b637054 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -358,6 +358,36 @@ ret:
358 kfree_skb(skb); 358 kfree_skb(skb);
359} 359}
360 360
361static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
362 struct sk_buff *skb)
363{
364 uint32_t *pattern = (uint32_t *)skb->data;
365
366 switch (*pattern) {
367 case 0x33221199:
368 {
369 struct htc_panic_bad_vaddr *htc_panic;
370 htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
371 dev_err(htc_handle->dev, "ath: firmware panic! "
372 "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
373 htc_panic->exccause, htc_panic->pc,
374 htc_panic->badvaddr);
375 break;
376 }
377 case 0x33221299:
378 {
379 struct htc_panic_bad_epid *htc_panic;
380 htc_panic = (struct htc_panic_bad_epid *) skb->data;
381 dev_err(htc_handle->dev, "ath: firmware panic! "
382 "bad epid: 0x%08x\n", htc_panic->epid);
383 break;
384 }
385 default:
386 dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
387 break;
388 }
389}
390
361/* 391/*
362 * HTC Messages are handled directly here and the obtained SKB 392 * HTC Messages are handled directly here and the obtained SKB
363 * is freed. 393 * is freed.
@@ -379,6 +409,12 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
379 htc_hdr = (struct htc_frame_hdr *) skb->data; 409 htc_hdr = (struct htc_frame_hdr *) skb->data;
380 epid = htc_hdr->endpoint_id; 410 epid = htc_hdr->endpoint_id;
381 411
412 if (epid == 0x99) {
413 ath9k_htc_fw_panic_report(htc_handle, skb);
414 kfree_skb(skb);
415 return;
416 }
417
382 if (epid >= ENDPOINT_MAX) { 418 if (epid >= ENDPOINT_MAX) {
383 if (pipe_id != USB_REG_IN_PIPE) 419 if (pipe_id != USB_REG_IN_PIPE)
384 dev_kfree_skb_any(skb); 420 dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index e1ffbb6bd636..06474ccc7696 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -77,6 +77,18 @@ struct htc_config_pipe_msg {
77 u8 credits; 77 u8 credits;
78} __packed; 78} __packed;
79 79
80struct htc_panic_bad_vaddr {
81 __be32 pattern;
82 __be32 exccause;
83 __be32 pc;
84 __be32 badvaddr;
85} __packed;
86
87struct htc_panic_bad_epid {
88 __be32 pattern;
89 __be32 epid;
90} __packed;
91
80struct htc_ep_callbacks { 92struct htc_ep_callbacks {
81 void *priv; 93 void *priv;
82 void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok); 94 void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9078a6c5a74e..c8a9dfab1fee 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,7 +23,6 @@
23 23
24#include "hw.h" 24#include "hw.h"
25#include "hw-ops.h" 25#include "hw-ops.h"
26#include "rc.h"
27#include "ar9003_mac.h" 26#include "ar9003_mac.h"
28#include "ar9003_mci.h" 27#include "ar9003_mci.h"
29#include "ar9003_phy.h" 28#include "ar9003_phy.h"
@@ -883,7 +882,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
883 AR_IMR_RXORN | 882 AR_IMR_RXORN |
884 AR_IMR_BCNMISC; 883 AR_IMR_BCNMISC;
885 884
886 if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) 885 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
887 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 886 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
888 887
889 if (AR_SREV_9300_20_OR_LATER(ah)) { 888 if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -3048,6 +3047,7 @@ static struct {
3048 { AR_SREV_VERSION_9462, "9462" }, 3047 { AR_SREV_VERSION_9462, "9462" },
3049 { AR_SREV_VERSION_9550, "9550" }, 3048 { AR_SREV_VERSION_9550, "9550" },
3050 { AR_SREV_VERSION_9565, "9565" }, 3049 { AR_SREV_VERSION_9565, "9565" },
3050 { AR_SREV_VERSION_9531, "9531" },
3051}; 3051};
3052 3052
3053/* For devices with external radios */ 3053/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 1fc2e5a26b52..c0a4e866edca 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -62,111 +62,6 @@ module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); 62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
63 63
64bool is_ath9k_unloaded; 64bool is_ath9k_unloaded;
65/* We use the hw_value as an index into our private channel structure */
66
67#define CHAN2G(_freq, _idx) { \
68 .band = IEEE80211_BAND_2GHZ, \
69 .center_freq = (_freq), \
70 .hw_value = (_idx), \
71 .max_power = 20, \
72}
73
74#define CHAN5G(_freq, _idx) { \
75 .band = IEEE80211_BAND_5GHZ, \
76 .center_freq = (_freq), \
77 .hw_value = (_idx), \
78 .max_power = 20, \
79}
80
81/* Some 2 GHz radios are actually tunable on 2312-2732
82 * on 5 MHz steps, we support the channels which we know
83 * we have calibration data for all cards though to make
84 * this static */
85static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
86 CHAN2G(2412, 0), /* Channel 1 */
87 CHAN2G(2417, 1), /* Channel 2 */
88 CHAN2G(2422, 2), /* Channel 3 */
89 CHAN2G(2427, 3), /* Channel 4 */
90 CHAN2G(2432, 4), /* Channel 5 */
91 CHAN2G(2437, 5), /* Channel 6 */
92 CHAN2G(2442, 6), /* Channel 7 */
93 CHAN2G(2447, 7), /* Channel 8 */
94 CHAN2G(2452, 8), /* Channel 9 */
95 CHAN2G(2457, 9), /* Channel 10 */
96 CHAN2G(2462, 10), /* Channel 11 */
97 CHAN2G(2467, 11), /* Channel 12 */
98 CHAN2G(2472, 12), /* Channel 13 */
99 CHAN2G(2484, 13), /* Channel 14 */
100};
101
102/* Some 5 GHz radios are actually tunable on XXXX-YYYY
103 * on 5 MHz steps, we support the channels which we know
104 * we have calibration data for all cards though to make
105 * this static */
106static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
107 /* _We_ call this UNII 1 */
108 CHAN5G(5180, 14), /* Channel 36 */
109 CHAN5G(5200, 15), /* Channel 40 */
110 CHAN5G(5220, 16), /* Channel 44 */
111 CHAN5G(5240, 17), /* Channel 48 */
112 /* _We_ call this UNII 2 */
113 CHAN5G(5260, 18), /* Channel 52 */
114 CHAN5G(5280, 19), /* Channel 56 */
115 CHAN5G(5300, 20), /* Channel 60 */
116 CHAN5G(5320, 21), /* Channel 64 */
117 /* _We_ call this "Middle band" */
118 CHAN5G(5500, 22), /* Channel 100 */
119 CHAN5G(5520, 23), /* Channel 104 */
120 CHAN5G(5540, 24), /* Channel 108 */
121 CHAN5G(5560, 25), /* Channel 112 */
122 CHAN5G(5580, 26), /* Channel 116 */
123 CHAN5G(5600, 27), /* Channel 120 */
124 CHAN5G(5620, 28), /* Channel 124 */
125 CHAN5G(5640, 29), /* Channel 128 */
126 CHAN5G(5660, 30), /* Channel 132 */
127 CHAN5G(5680, 31), /* Channel 136 */
128 CHAN5G(5700, 32), /* Channel 140 */
129 /* _We_ call this UNII 3 */
130 CHAN5G(5745, 33), /* Channel 149 */
131 CHAN5G(5765, 34), /* Channel 153 */
132 CHAN5G(5785, 35), /* Channel 157 */
133 CHAN5G(5805, 36), /* Channel 161 */
134 CHAN5G(5825, 37), /* Channel 165 */
135};
136
137/* Atheros hardware rate code addition for short premble */
138#define SHPCHECK(__hw_rate, __flags) \
139 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
140
141#define RATE(_bitrate, _hw_rate, _flags) { \
142 .bitrate = (_bitrate), \
143 .flags = (_flags), \
144 .hw_value = (_hw_rate), \
145 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
146}
147
148static struct ieee80211_rate ath9k_legacy_rates[] = {
149 RATE(10, 0x1b, 0),
150 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
151 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
152 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
153 RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
154 IEEE80211_RATE_SUPPORTS_10MHZ)),
155 RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
156 IEEE80211_RATE_SUPPORTS_10MHZ)),
157 RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
158 IEEE80211_RATE_SUPPORTS_10MHZ)),
159 RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
160 IEEE80211_RATE_SUPPORTS_10MHZ)),
161 RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
162 IEEE80211_RATE_SUPPORTS_10MHZ)),
163 RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
164 IEEE80211_RATE_SUPPORTS_10MHZ)),
165 RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
166 IEEE80211_RATE_SUPPORTS_10MHZ)),
167 RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
168 IEEE80211_RATE_SUPPORTS_10MHZ)),
169};
170 65
171#ifdef CONFIG_MAC80211_LEDS 66#ifdef CONFIG_MAC80211_LEDS
172static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { 67static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
@@ -258,64 +153,6 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
258/* Initialization */ 153/* Initialization */
259/**************************/ 154/**************************/
260 155
261static void setup_ht_cap(struct ath_softc *sc,
262 struct ieee80211_sta_ht_cap *ht_info)
263{
264 struct ath_hw *ah = sc->sc_ah;
265 struct ath_common *common = ath9k_hw_common(ah);
266 u8 tx_streams, rx_streams;
267 int i, max_streams;
268
269 ht_info->ht_supported = true;
270 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
271 IEEE80211_HT_CAP_SM_PS |
272 IEEE80211_HT_CAP_SGI_40 |
273 IEEE80211_HT_CAP_DSSSCCK40;
274
275 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
276 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
277
278 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
279 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
280
281 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
282 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
283
284 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
285 max_streams = 1;
286 else if (AR_SREV_9462(ah))
287 max_streams = 2;
288 else if (AR_SREV_9300_20_OR_LATER(ah))
289 max_streams = 3;
290 else
291 max_streams = 2;
292
293 if (AR_SREV_9280_20_OR_LATER(ah)) {
294 if (max_streams >= 2)
295 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
296 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
297 }
298
299 /* set up supported mcs set */
300 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
301 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
302 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
303
304 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
305 tx_streams, rx_streams);
306
307 if (tx_streams != rx_streams) {
308 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
309 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
310 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
311 }
312
313 for (i = 0; i < rx_streams; i++)
314 ht_info->mcs.rx_mask[i] = 0xff;
315
316 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
317}
318
319static void ath9k_reg_notifier(struct wiphy *wiphy, 156static void ath9k_reg_notifier(struct wiphy *wiphy,
320 struct regulatory_request *request) 157 struct regulatory_request *request)
321{ 158{
@@ -486,51 +323,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
486 return 0; 323 return 0;
487} 324}
488 325
489static int ath9k_init_channels_rates(struct ath_softc *sc)
490{
491 void *channels;
492
493 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
494 ARRAY_SIZE(ath9k_5ghz_chantable) !=
495 ATH9K_NUM_CHANNELS);
496
497 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
498 channels = devm_kzalloc(sc->dev,
499 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
500 if (!channels)
501 return -ENOMEM;
502
503 memcpy(channels, ath9k_2ghz_chantable,
504 sizeof(ath9k_2ghz_chantable));
505 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
506 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
507 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
508 ARRAY_SIZE(ath9k_2ghz_chantable);
509 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
510 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
511 ARRAY_SIZE(ath9k_legacy_rates);
512 }
513
514 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
515 channels = devm_kzalloc(sc->dev,
516 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
517 if (!channels)
518 return -ENOMEM;
519
520 memcpy(channels, ath9k_5ghz_chantable,
521 sizeof(ath9k_5ghz_chantable));
522 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
523 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
524 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
525 ARRAY_SIZE(ath9k_5ghz_chantable);
526 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
527 ath9k_legacy_rates + 4;
528 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
529 ARRAY_SIZE(ath9k_legacy_rates) - 4;
530 }
531 return 0;
532}
533
534static void ath9k_init_misc(struct ath_softc *sc) 326static void ath9k_init_misc(struct ath_softc *sc)
535{ 327{
536 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 328 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -538,7 +330,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
538 330
539 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 331 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
540 332
541 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 333 common->last_rssi = ATH_RSSI_DUMMY_MARKER;
542 sc->config.txpowlimit = ATH_TXPOWER_MAX; 334 sc->config.txpowlimit = ATH_TXPOWER_MAX;
543 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 335 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
544 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 336 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -793,7 +585,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
793 if (ret) 585 if (ret)
794 goto err_btcoex; 586 goto err_btcoex;
795 587
796 ret = ath9k_init_channels_rates(sc); 588 ret = ath9k_cmn_init_channels_rates(common);
797 if (ret) 589 if (ret)
798 goto err_btcoex; 590 goto err_btcoex;
799 591
@@ -823,10 +615,11 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
823 struct ieee80211_supported_band *sband; 615 struct ieee80211_supported_band *sband;
824 struct ieee80211_channel *chan; 616 struct ieee80211_channel *chan;
825 struct ath_hw *ah = sc->sc_ah; 617 struct ath_hw *ah = sc->sc_ah;
618 struct ath_common *common = ath9k_hw_common(ah);
826 struct cfg80211_chan_def chandef; 619 struct cfg80211_chan_def chandef;
827 int i; 620 int i;
828 621
829 sband = &sc->sbands[band]; 622 sband = &common->sbands[band];
830 for (i = 0; i < sband->n_channels; i++) { 623 for (i = 0; i < sband->n_channels; i++) {
831 chan = &sband->channels[i]; 624 chan = &sband->channels[i];
832 ah->curchan = &ah->channels[chan->hw_value]; 625 ah->curchan = &ah->channels[chan->hw_value];
@@ -849,17 +642,6 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
849 ah->curchan = curchan; 642 ah->curchan = curchan;
850} 643}
851 644
852void ath9k_reload_chainmask_settings(struct ath_softc *sc)
853{
854 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
855 return;
856
857 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
858 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
859 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
860 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
861}
862
863static const struct ieee80211_iface_limit if_limits[] = { 645static const struct ieee80211_iface_limit if_limits[] = {
864 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | 646 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
865 BIT(NL80211_IFTYPE_P2P_CLIENT) | 647 BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -949,6 +731,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
949 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 731 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
950 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ; 732 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
951 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 733 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
734 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
952 735
953 hw->queues = 4; 736 hw->queues = 4;
954 hw->max_rates = 4; 737 hw->max_rates = 4;
@@ -969,13 +752,13 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
969 752
970 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 753 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
971 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 754 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
972 &sc->sbands[IEEE80211_BAND_2GHZ]; 755 &common->sbands[IEEE80211_BAND_2GHZ];
973 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 756 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
974 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 757 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
975 &sc->sbands[IEEE80211_BAND_5GHZ]; 758 &common->sbands[IEEE80211_BAND_5GHZ];
976 759
977 ath9k_init_wow(hw); 760 ath9k_init_wow(hw);
978 ath9k_reload_chainmask_settings(sc); 761 ath9k_cmn_reload_chainmask(ah);
979 762
980 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 763 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
981} 764}
@@ -1106,19 +889,11 @@ static int __init ath9k_init(void)
1106{ 889{
1107 int error; 890 int error;
1108 891
1109 /* Register rate control algorithm */
1110 error = ath_rate_control_register();
1111 if (error != 0) {
1112 pr_err("Unable to register rate control algorithm: %d\n",
1113 error);
1114 goto err_out;
1115 }
1116
1117 error = ath_pci_init(); 892 error = ath_pci_init();
1118 if (error < 0) { 893 if (error < 0) {
1119 pr_err("No PCI devices found, driver not installed\n"); 894 pr_err("No PCI devices found, driver not installed\n");
1120 error = -ENODEV; 895 error = -ENODEV;
1121 goto err_rate_unregister; 896 goto err_out;
1122 } 897 }
1123 898
1124 error = ath_ahb_init(); 899 error = ath_ahb_init();
@@ -1131,9 +906,6 @@ static int __init ath9k_init(void)
1131 906
1132 err_pci_exit: 907 err_pci_exit:
1133 ath_pci_exit(); 908 ath_pci_exit();
1134
1135 err_rate_unregister:
1136 ath_rate_control_unregister();
1137 err_out: 909 err_out:
1138 return error; 910 return error;
1139} 911}
@@ -1144,7 +916,6 @@ static void __exit ath9k_exit(void)
1144 is_ath9k_unloaded = true; 916 is_ath9k_unloaded = true;
1145 ath_ahb_exit(); 917 ath_ahb_exit();
1146 ath_pci_exit(); 918 ath_pci_exit();
1147 ath_rate_control_unregister();
1148 pr_info("%s: Driver unloaded\n", dev_info); 919 pr_info("%s: Driver unloaded\n", dev_info);
1149} 920}
1150module_exit(ath9k_exit); 921module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 30dcef5aba10..72a715fe8f24 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -115,13 +115,14 @@ void ath_hw_pll_work(struct work_struct *work)
115 u32 pll_sqsum; 115 u32 pll_sqsum;
116 struct ath_softc *sc = container_of(work, struct ath_softc, 116 struct ath_softc *sc = container_of(work, struct ath_softc,
117 hw_pll_work.work); 117 hw_pll_work.work);
118 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
118 /* 119 /*
119 * ensure that the PLL WAR is executed only 120 * ensure that the PLL WAR is executed only
120 * after the STA is associated (or) if the 121 * after the STA is associated (or) if the
121 * beaconing had started in interfaces that 122 * beaconing had started in interfaces that
122 * uses beacons. 123 * uses beacons.
123 */ 124 */
124 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) 125 if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
125 return; 126 return;
126 127
127 if (sc->tx99_state) 128 if (sc->tx99_state)
@@ -414,7 +415,7 @@ void ath_start_ani(struct ath_softc *sc)
414 unsigned long timestamp = jiffies_to_msecs(jiffies); 415 unsigned long timestamp = jiffies_to_msecs(jiffies);
415 416
416 if (common->disable_ani || 417 if (common->disable_ani ||
417 !test_bit(SC_OP_ANI_RUN, &sc->sc_flags) || 418 !test_bit(ATH_OP_ANI_RUN, &common->op_flags) ||
418 (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) 419 (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
419 return; 420 return;
420 421
@@ -438,6 +439,7 @@ void ath_stop_ani(struct ath_softc *sc)
438void ath_check_ani(struct ath_softc *sc) 439void ath_check_ani(struct ath_softc *sc)
439{ 440{
440 struct ath_hw *ah = sc->sc_ah; 441 struct ath_hw *ah = sc->sc_ah;
442 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
441 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 443 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
442 444
443 /* 445 /*
@@ -453,23 +455,23 @@ void ath_check_ani(struct ath_softc *sc)
453 * Disable ANI only when there are no 455 * Disable ANI only when there are no
454 * associated stations. 456 * associated stations.
455 */ 457 */
456 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) 458 if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
457 goto stop_ani; 459 goto stop_ani;
458 } 460 }
459 } else if (ah->opmode == NL80211_IFTYPE_STATION) { 461 } else if (ah->opmode == NL80211_IFTYPE_STATION) {
460 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) 462 if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
461 goto stop_ani; 463 goto stop_ani;
462 } 464 }
463 465
464 if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags)) { 466 if (!test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
465 set_bit(SC_OP_ANI_RUN, &sc->sc_flags); 467 set_bit(ATH_OP_ANI_RUN, &common->op_flags);
466 ath_start_ani(sc); 468 ath_start_ani(sc);
467 } 469 }
468 470
469 return; 471 return;
470 472
471stop_ani: 473stop_ani:
472 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags); 474 clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
473 ath_stop_ani(sc); 475 ath_stop_ani(sc);
474} 476}
475 477
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 5f727588ca27..51ce36f108f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -827,7 +827,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
827 return; 827 return;
828 } 828 }
829 829
830 if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) 830 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
831 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 831 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
832 832
833 async_mask = AR_INTR_MAC_IRQ; 833 async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 10271373a0cd..89df634e81f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -155,12 +155,8 @@ struct ath_htc_rx_status {
155 u8 rs_status; 155 u8 rs_status;
156 u8 rs_phyerr; 156 u8 rs_phyerr;
157 int8_t rs_rssi; 157 int8_t rs_rssi;
158 int8_t rs_rssi_ctl0; 158 int8_t rs_rssi_ctl[3];
159 int8_t rs_rssi_ctl1; 159 int8_t rs_rssi_ext[3];
160 int8_t rs_rssi_ctl2;
161 int8_t rs_rssi_ext0;
162 int8_t rs_rssi_ext1;
163 int8_t rs_rssi_ext2;
164 u8 rs_keyix; 160 u8 rs_keyix;
165 u8 rs_rate; 161 u8 rs_rate;
166 u8 rs_antenna; 162 u8 rs_antenna;
@@ -170,6 +166,7 @@ struct ath_htc_rx_status {
170 u8 rs_num_delims; 166 u8 rs_num_delims;
171 u8 rs_flags; 167 u8 rs_flags;
172 u8 rs_dummy; 168 u8 rs_dummy;
169 /* FIXME: evm* never used? */
173 __be32 evm0; 170 __be32 evm0;
174 __be32 evm1; 171 __be32 evm1;
175 __be32 evm2; 172 __be32 evm2;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5924f72dd493..d69853b848ce 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -229,16 +229,16 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
229 ath9k_cmn_update_txpow(ah, sc->curtxpow, 229 ath9k_cmn_update_txpow(ah, sc->curtxpow,
230 sc->config.txpowlimit, &sc->curtxpow); 230 sc->config.txpowlimit, &sc->curtxpow);
231 231
232 clear_bit(SC_OP_HW_RESET, &sc->sc_flags); 232 clear_bit(ATH_OP_HW_RESET, &common->op_flags);
233 ath9k_hw_set_interrupts(ah); 233 ath9k_hw_set_interrupts(ah);
234 ath9k_hw_enable_interrupts(ah); 234 ath9k_hw_enable_interrupts(ah);
235 235
236 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) { 236 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
237 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) 237 if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
238 goto work; 238 goto work;
239 239
240 if (ah->opmode == NL80211_IFTYPE_STATION && 240 if (ah->opmode == NL80211_IFTYPE_STATION &&
241 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { 241 test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
242 spin_lock_irqsave(&sc->sc_pm_lock, flags); 242 spin_lock_irqsave(&sc->sc_pm_lock, flags);
243 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 243 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
244 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 244 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -336,7 +336,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
336 int old_pos = -1; 336 int old_pos = -1;
337 int r; 337 int r;
338 338
339 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 339 if (test_bit(ATH_OP_INVALID, &common->op_flags))
340 return -EIO; 340 return -EIO;
341 341
342 offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL); 342 offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -402,7 +402,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
402 chan->center_freq); 402 chan->center_freq);
403 } else { 403 } else {
404 /* perform spectral scan if requested. */ 404 /* perform spectral scan if requested. */
405 if (test_bit(SC_OP_SCANNING, &sc->sc_flags) && 405 if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
406 sc->spectral_mode == SPECTRAL_CHANSCAN) 406 sc->spectral_mode == SPECTRAL_CHANSCAN)
407 ath9k_spectral_scan_trigger(hw); 407 ath9k_spectral_scan_trigger(hw);
408 } 408 }
@@ -451,7 +451,7 @@ void ath9k_tasklet(unsigned long data)
451 * interrupts are enabled in the reset routine. 451 * interrupts are enabled in the reset routine.
452 */ 452 */
453 atomic_inc(&ah->intr_ref_cnt); 453 atomic_inc(&ah->intr_ref_cnt);
454 ath_dbg(common, ANY, "FATAL: Skipping interrupts\n"); 454 ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
455 goto out; 455 goto out;
456 } 456 }
457 457
@@ -471,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
471 * interrupts are enabled in the reset routine. 471 * interrupts are enabled in the reset routine.
472 */ 472 */
473 atomic_inc(&ah->intr_ref_cnt); 473 atomic_inc(&ah->intr_ref_cnt);
474 ath_dbg(common, ANY, 474 ath_dbg(common, RESET,
475 "BB_WATCHDOG: Skipping interrupts\n"); 475 "BB_WATCHDOG: Skipping interrupts\n");
476 goto out; 476 goto out;
477 } 477 }
@@ -484,7 +484,7 @@ void ath9k_tasklet(unsigned long data)
484 type = RESET_TYPE_TX_GTT; 484 type = RESET_TYPE_TX_GTT;
485 ath9k_queue_reset(sc, type); 485 ath9k_queue_reset(sc, type);
486 atomic_inc(&ah->intr_ref_cnt); 486 atomic_inc(&ah->intr_ref_cnt);
487 ath_dbg(common, ANY, 487 ath_dbg(common, RESET,
488 "GTT: Skipping interrupts\n"); 488 "GTT: Skipping interrupts\n");
489 goto out; 489 goto out;
490 } 490 }
@@ -566,6 +566,7 @@ irqreturn_t ath_isr(int irq, void *dev)
566 566
567 struct ath_softc *sc = dev; 567 struct ath_softc *sc = dev;
568 struct ath_hw *ah = sc->sc_ah; 568 struct ath_hw *ah = sc->sc_ah;
569 struct ath_common *common = ath9k_hw_common(ah);
569 enum ath9k_int status; 570 enum ath9k_int status;
570 u32 sync_cause = 0; 571 u32 sync_cause = 0;
571 bool sched = false; 572 bool sched = false;
@@ -575,7 +576,7 @@ irqreturn_t ath_isr(int irq, void *dev)
575 * touch anything. Note this can happen early 576 * touch anything. Note this can happen early
576 * on if the IRQ is shared. 577 * on if the IRQ is shared.
577 */ 578 */
578 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 579 if (test_bit(ATH_OP_INVALID, &common->op_flags))
579 return IRQ_NONE; 580 return IRQ_NONE;
580 581
581 /* shared irq, not for us */ 582 /* shared irq, not for us */
@@ -583,7 +584,7 @@ irqreturn_t ath_isr(int irq, void *dev)
583 if (!ath9k_hw_intrpend(ah)) 584 if (!ath9k_hw_intrpend(ah))
584 return IRQ_NONE; 585 return IRQ_NONE;
585 586
586 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) { 587 if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
587 ath9k_hw_kill_interrupts(ah); 588 ath9k_hw_kill_interrupts(ah);
588 return IRQ_HANDLED; 589 return IRQ_HANDLED;
589 } 590 }
@@ -684,10 +685,11 @@ int ath_reset(struct ath_softc *sc)
684 685
685void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type) 686void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
686{ 687{
688 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
687#ifdef CONFIG_ATH9K_DEBUGFS 689#ifdef CONFIG_ATH9K_DEBUGFS
688 RESET_STAT_INC(sc, type); 690 RESET_STAT_INC(sc, type);
689#endif 691#endif
690 set_bit(SC_OP_HW_RESET, &sc->sc_flags); 692 set_bit(ATH_OP_HW_RESET, &common->op_flags);
691 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 693 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
692} 694}
693 695
@@ -768,7 +770,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
768 770
769 ath_mci_enable(sc); 771 ath_mci_enable(sc);
770 772
771 clear_bit(SC_OP_INVALID, &sc->sc_flags); 773 clear_bit(ATH_OP_INVALID, &common->op_flags);
772 sc->sc_ah->is_monitoring = false; 774 sc->sc_ah->is_monitoring = false;
773 775
774 if (!ath_complete_reset(sc, false)) 776 if (!ath_complete_reset(sc, false))
@@ -885,7 +887,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
885 887
886 ath_cancel_work(sc); 888 ath_cancel_work(sc);
887 889
888 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) { 890 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
889 ath_dbg(common, ANY, "Device not present\n"); 891 ath_dbg(common, ANY, "Device not present\n");
890 mutex_unlock(&sc->mutex); 892 mutex_unlock(&sc->mutex);
891 return; 893 return;
@@ -940,7 +942,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
940 942
941 ath9k_ps_restore(sc); 943 ath9k_ps_restore(sc);
942 944
943 set_bit(SC_OP_INVALID, &sc->sc_flags); 945 set_bit(ATH_OP_INVALID, &common->op_flags);
944 sc->ps_idle = prev_idle; 946 sc->ps_idle = prev_idle;
945 947
946 mutex_unlock(&sc->mutex); 948 mutex_unlock(&sc->mutex);
@@ -1081,7 +1083,7 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1081 */ 1083 */
1082 if (ah->opmode == NL80211_IFTYPE_STATION && 1084 if (ah->opmode == NL80211_IFTYPE_STATION &&
1083 old_opmode == NL80211_IFTYPE_AP && 1085 old_opmode == NL80211_IFTYPE_AP &&
1084 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { 1086 test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
1085 ieee80211_iterate_active_interfaces_atomic( 1087 ieee80211_iterate_active_interfaces_atomic(
1086 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 1088 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1087 ath9k_sta_vif_iter, sc); 1089 ath9k_sta_vif_iter, sc);
@@ -1178,9 +1180,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1178 if (ath9k_uses_beacons(vif->type)) 1180 if (ath9k_uses_beacons(vif->type))
1179 ath9k_beacon_remove_slot(sc, vif); 1181 ath9k_beacon_remove_slot(sc, vif);
1180 1182
1181 if (sc->csa_vif == vif)
1182 sc->csa_vif = NULL;
1183
1184 ath9k_ps_wakeup(sc); 1183 ath9k_ps_wakeup(sc);
1185 ath9k_calculate_summary_state(hw, NULL); 1184 ath9k_calculate_summary_state(hw, NULL);
1186 ath9k_ps_restore(sc); 1185 ath9k_ps_restore(sc);
@@ -1593,7 +1592,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
1593 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 1592 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1594 unsigned long flags; 1593 unsigned long flags;
1595 1594
1596 set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags); 1595 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1597 avp->primary_sta_vif = true; 1596 avp->primary_sta_vif = true;
1598 1597
1599 /* 1598 /*
@@ -1609,7 +1608,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
1609 common->curaid = bss_conf->aid; 1608 common->curaid = bss_conf->aid;
1610 ath9k_hw_write_associd(sc->sc_ah); 1609 ath9k_hw_write_associd(sc->sc_ah);
1611 1610
1612 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 1611 common->last_rssi = ATH_RSSI_DUMMY_MARKER;
1613 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1612 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1614 1613
1615 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1614 spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -1628,8 +1627,9 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1628{ 1627{
1629 struct ath_softc *sc = data; 1628 struct ath_softc *sc = data;
1630 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 1629 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1630 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1631 1631
1632 if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) 1632 if (test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
1633 return; 1633 return;
1634 1634
1635 if (bss_conf->assoc) 1635 if (bss_conf->assoc)
@@ -1660,18 +1660,18 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1660 bss_conf->bssid, bss_conf->assoc); 1660 bss_conf->bssid, bss_conf->assoc);
1661 1661
1662 if (avp->primary_sta_vif && !bss_conf->assoc) { 1662 if (avp->primary_sta_vif && !bss_conf->assoc) {
1663 clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags); 1663 clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1664 avp->primary_sta_vif = false; 1664 avp->primary_sta_vif = false;
1665 1665
1666 if (ah->opmode == NL80211_IFTYPE_STATION) 1666 if (ah->opmode == NL80211_IFTYPE_STATION)
1667 clear_bit(SC_OP_BEACONS, &sc->sc_flags); 1667 clear_bit(ATH_OP_BEACONS, &common->op_flags);
1668 } 1668 }
1669 1669
1670 ieee80211_iterate_active_interfaces_atomic( 1670 ieee80211_iterate_active_interfaces_atomic(
1671 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 1671 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1672 ath9k_bss_assoc_iter, sc); 1672 ath9k_bss_assoc_iter, sc);
1673 1673
1674 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) && 1674 if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags) &&
1675 ah->opmode == NL80211_IFTYPE_STATION) { 1675 ah->opmode == NL80211_IFTYPE_STATION) {
1676 memset(common->curbssid, 0, ETH_ALEN); 1676 memset(common->curbssid, 0, ETH_ALEN);
1677 common->curaid = 0; 1677 common->curaid = 0;
@@ -1866,7 +1866,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
1866 1866
1867static bool ath9k_has_tx_pending(struct ath_softc *sc) 1867static bool ath9k_has_tx_pending(struct ath_softc *sc)
1868{ 1868{
1869 int i, npend; 1869 int i, npend = 0;
1870 1870
1871 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1871 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1872 if (!ATH_TXQ_SETUP(sc, i)) 1872 if (!ATH_TXQ_SETUP(sc, i))
@@ -1900,7 +1900,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1900 return; 1900 return;
1901 } 1901 }
1902 1902
1903 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) { 1903 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
1904 ath_dbg(common, ANY, "Device not present\n"); 1904 ath_dbg(common, ANY, "Device not present\n");
1905 mutex_unlock(&sc->mutex); 1905 mutex_unlock(&sc->mutex);
1906 return; 1906 return;
@@ -2056,7 +2056,7 @@ static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
2056 ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant); 2056 ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
2057 2057
2058 ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant); 2058 ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
2059 ath9k_reload_chainmask_settings(sc); 2059 ath9k_cmn_reload_chainmask(ah);
2060 2060
2061 return 0; 2061 return 0;
2062} 2062}
@@ -2073,26 +2073,23 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2073static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2073static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2074{ 2074{
2075 struct ath_softc *sc = hw->priv; 2075 struct ath_softc *sc = hw->priv;
2076 set_bit(SC_OP_SCANNING, &sc->sc_flags); 2076 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2077 set_bit(ATH_OP_SCANNING, &common->op_flags);
2077} 2078}
2078 2079
2079static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) 2080static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2080{ 2081{
2081 struct ath_softc *sc = hw->priv; 2082 struct ath_softc *sc = hw->priv;
2082 clear_bit(SC_OP_SCANNING, &sc->sc_flags); 2083 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2084 clear_bit(ATH_OP_SCANNING, &common->op_flags);
2083} 2085}
2084 2086
2085static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw, 2087static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
2086 struct ieee80211_vif *vif, 2088 struct ieee80211_vif *vif,
2087 struct cfg80211_chan_def *chandef) 2089 struct cfg80211_chan_def *chandef)
2088{ 2090{
2089 struct ath_softc *sc = hw->priv; 2091 /* depend on vif->csa_active only */
2090 2092 return;
2091 /* mac80211 does not support CSA in multi-if cases (yet) */
2092 if (WARN_ON(sc->csa_vif))
2093 return;
2094
2095 sc->csa_vif = vif;
2096} 2093}
2097 2094
2098struct ieee80211_ops ath9k_ops = { 2095struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 71799fcade54..a0dbcc412384 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -555,7 +555,7 @@ void ath_mci_intr(struct ath_softc *sc)
555 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM; 555 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
556 556
557 while (more_data == MCI_GPM_MORE) { 557 while (more_data == MCI_GPM_MORE) {
558 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 558 if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
559 return; 559 return;
560 560
561 pgpm = mci->gpm_buf.bf_addr; 561 pgpm = mci->gpm_buf.bf_addr;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 55724b02316b..25304adece57 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -784,6 +784,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
784{ 784{
785 struct ath_softc *sc; 785 struct ath_softc *sc;
786 struct ieee80211_hw *hw; 786 struct ieee80211_hw *hw;
787 struct ath_common *common;
787 u8 csz; 788 u8 csz;
788 u32 val; 789 u32 val;
789 int ret = 0; 790 int ret = 0;
@@ -858,9 +859,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
858 sc->mem = pcim_iomap_table(pdev)[0]; 859 sc->mem = pcim_iomap_table(pdev)[0];
859 sc->driver_data = id->driver_data; 860 sc->driver_data = id->driver_data;
860 861
861 /* Will be cleared in ath9k_start() */
862 set_bit(SC_OP_INVALID, &sc->sc_flags);
863
864 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); 862 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
865 if (ret) { 863 if (ret) {
866 dev_err(&pdev->dev, "request_irq failed\n"); 864 dev_err(&pdev->dev, "request_irq failed\n");
@@ -879,6 +877,10 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
879 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 877 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
880 hw_name, (unsigned long)sc->mem, pdev->irq); 878 hw_name, (unsigned long)sc->mem, pdev->irq);
881 879
880 /* Will be cleared in ath9k_start() */
881 common = ath9k_hw_common(sc->sc_ah);
882 set_bit(ATH_OP_INVALID, &common->op_flags);
883
882 return 0; 884 return 0;
883 885
884err_init: 886err_init:
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
deleted file mode 100644
index d829bb62a3fc..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ /dev/null
@@ -1,1495 +0,0 @@
1/*
2 * Copyright (c) 2004 Video54 Technologies, Inc.
3 * Copyright (c) 2004-2011 Atheros Communications, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/slab.h>
19#include <linux/export.h>
20
21#include "ath9k.h"
22
23static const struct ath_rate_table ar5416_11na_ratetable = {
24 68,
25 8, /* MCS start */
26 {
27 [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
28 5400, 0, 12 }, /* 6 Mb */
29 [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
30 7800, 1, 18 }, /* 9 Mb */
31 [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
32 10000, 2, 24 }, /* 12 Mb */
33 [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
34 13900, 3, 36 }, /* 18 Mb */
35 [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
36 17300, 4, 48 }, /* 24 Mb */
37 [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
38 23000, 5, 72 }, /* 36 Mb */
39 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
40 27400, 6, 96 }, /* 48 Mb */
41 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
42 29300, 7, 108 }, /* 54 Mb */
43 [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
44 6400, 0, 0 }, /* 6.5 Mb */
45 [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
46 12700, 1, 1 }, /* 13 Mb */
47 [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
48 18800, 2, 2 }, /* 19.5 Mb */
49 [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
50 25000, 3, 3 }, /* 26 Mb */
51 [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
52 36700, 4, 4 }, /* 39 Mb */
53 [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
54 48100, 5, 5 }, /* 52 Mb */
55 [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
56 53500, 6, 6 }, /* 58.5 Mb */
57 [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
58 59000, 7, 7 }, /* 65 Mb */
59 [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
60 65400, 7, 7 }, /* 75 Mb */
61 [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
62 12700, 8, 8 }, /* 13 Mb */
63 [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
64 24800, 9, 9 }, /* 26 Mb */
65 [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
66 36600, 10, 10 }, /* 39 Mb */
67 [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
68 48100, 11, 11 }, /* 52 Mb */
69 [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
70 69500, 12, 12 }, /* 78 Mb */
71 [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
72 89500, 13, 13 }, /* 104 Mb */
73 [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
74 98900, 14, 14 }, /* 117 Mb */
75 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
76 108300, 15, 15 }, /* 130 Mb */
77 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
78 120000, 15, 15 }, /* 144.4 Mb */
79 [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
80 17400, 16, 16 }, /* 19.5 Mb */
81 [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
82 35100, 17, 17 }, /* 39 Mb */
83 [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
84 52600, 18, 18 }, /* 58.5 Mb */
85 [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
86 70400, 19, 19 }, /* 78 Mb */
87 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
88 104900, 20, 20 }, /* 117 Mb */
89 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
90 115800, 20, 20 }, /* 130 Mb*/
91 [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
92 137200, 21, 21 }, /* 156 Mb */
93 [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
94 151100, 21, 21 }, /* 173.3 Mb */
95 [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
96 152800, 22, 22 }, /* 175.5 Mb */
97 [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
98 168400, 22, 22 }, /* 195 Mb*/
99 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
100 168400, 23, 23 }, /* 195 Mb */
101 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
102 185000, 23, 23 }, /* 216.7 Mb */
103 [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
104 13200, 0, 0 }, /* 13.5 Mb*/
105 [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
106 25900, 1, 1 }, /* 27.0 Mb*/
107 [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
108 38600, 2, 2 }, /* 40.5 Mb*/
109 [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
110 49800, 3, 3 }, /* 54 Mb */
111 [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
112 72200, 4, 4 }, /* 81 Mb */
113 [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
114 92900, 5, 5 }, /* 108 Mb */
115 [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
116 102700, 6, 6 }, /* 121.5 Mb*/
117 [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
118 112000, 7, 7 }, /* 135 Mb */
119 [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
120 122000, 7, 7 }, /* 150 Mb */
121 [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
122 25800, 8, 8 }, /* 27 Mb */
123 [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
124 49800, 9, 9 }, /* 54 Mb */
125 [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
126 71900, 10, 10 }, /* 81 Mb */
127 [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
128 92500, 11, 11 }, /* 108 Mb */
129 [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
130 130300, 12, 12 }, /* 162 Mb */
131 [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
132 162800, 13, 13 }, /* 216 Mb */
133 [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
134 178200, 14, 14 }, /* 243 Mb */
135 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
136 192100, 15, 15 }, /* 270 Mb */
137 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
138 207000, 15, 15 }, /* 300 Mb */
139 [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
140 36100, 16, 16 }, /* 40.5 Mb */
141 [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
142 72900, 17, 17 }, /* 81 Mb */
143 [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
144 108300, 18, 18 }, /* 121.5 Mb */
145 [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
146 142000, 19, 19 }, /* 162 Mb */
147 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
148 205100, 20, 20 }, /* 243 Mb */
149 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
150 224700, 20, 20 }, /* 270 Mb */
151 [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
152 263100, 21, 21 }, /* 324 Mb */
153 [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
154 288000, 21, 21 }, /* 360 Mb */
155 [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
156 290700, 22, 22 }, /* 364.5 Mb */
157 [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
158 317200, 22, 22 }, /* 405 Mb */
159 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
160 317200, 23, 23 }, /* 405 Mb */
161 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
162 346400, 23, 23 }, /* 450 Mb */
163 },
164 50, /* probe interval */
165 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
166};
167
168/* 4ms frame limit not used for NG mode. The values filled
169 * for HT are the 64K max aggregate limit */
170
171static const struct ath_rate_table ar5416_11ng_ratetable = {
172 72,
173 12, /* MCS start */
174 {
175 [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
176 900, 0, 2 }, /* 1 Mb */
177 [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
178 1900, 1, 4 }, /* 2 Mb */
179 [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
180 4900, 2, 11 }, /* 5.5 Mb */
181 [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
182 8100, 3, 22 }, /* 11 Mb */
183 [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
184 5400, 4, 12 }, /* 6 Mb */
185 [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
186 7800, 5, 18 }, /* 9 Mb */
187 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
188 10100, 6, 24 }, /* 12 Mb */
189 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
190 14100, 7, 36 }, /* 18 Mb */
191 [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
192 17700, 8, 48 }, /* 24 Mb */
193 [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
194 23700, 9, 72 }, /* 36 Mb */
195 [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
196 27400, 10, 96 }, /* 48 Mb */
197 [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
198 30900, 11, 108 }, /* 54 Mb */
199 [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
200 6400, 0, 0 }, /* 6.5 Mb */
201 [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
202 12700, 1, 1 }, /* 13 Mb */
203 [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
204 18800, 2, 2 }, /* 19.5 Mb*/
205 [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
206 25000, 3, 3 }, /* 26 Mb */
207 [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
208 36700, 4, 4 }, /* 39 Mb */
209 [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
210 48100, 5, 5 }, /* 52 Mb */
211 [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
212 53500, 6, 6 }, /* 58.5 Mb */
213 [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
214 59000, 7, 7 }, /* 65 Mb */
215 [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
216 65400, 7, 7 }, /* 65 Mb*/
217 [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
218 12700, 8, 8 }, /* 13 Mb */
219 [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
220 24800, 9, 9 }, /* 26 Mb */
221 [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
222 36600, 10, 10 }, /* 39 Mb */
223 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
224 48100, 11, 11 }, /* 52 Mb */
225 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
226 69500, 12, 12 }, /* 78 Mb */
227 [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
228 89500, 13, 13 }, /* 104 Mb */
229 [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
230 98900, 14, 14 }, /* 117 Mb */
231 [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
232 108300, 15, 15 }, /* 130 Mb */
233 [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
234 120000, 15, 15 }, /* 144.4 Mb */
235 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
236 17400, 16, 16 }, /* 19.5 Mb */
237 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
238 35100, 17, 17 }, /* 39 Mb */
239 [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
240 52600, 18, 18 }, /* 58.5 Mb */
241 [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
242 70400, 19, 19 }, /* 78 Mb */
243 [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
244 104900, 20, 20 }, /* 117 Mb */
245 [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
246 115800, 20, 20 }, /* 130 Mb */
247 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
248 137200, 21, 21 }, /* 156 Mb */
249 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
250 151100, 21, 21 }, /* 173.3 Mb */
251 [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
252 152800, 22, 22 }, /* 175.5 Mb */
253 [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
254 168400, 22, 22 }, /* 195 Mb */
255 [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
256 168400, 23, 23 }, /* 195 Mb */
257 [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
258 185000, 23, 23 }, /* 216.7 Mb */
259 [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
260 13200, 0, 0 }, /* 13.5 Mb */
261 [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
262 25900, 1, 1 }, /* 27.0 Mb */
263 [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
264 38600, 2, 2 }, /* 40.5 Mb */
265 [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
266 49800, 3, 3 }, /* 54 Mb */
267 [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
268 72200, 4, 4 }, /* 81 Mb */
269 [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
270 92900, 5, 5 }, /* 108 Mb */
271 [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
272 102700, 6, 6 }, /* 121.5 Mb */
273 [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
274 112000, 7, 7 }, /* 135 Mb */
275 [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
276 122000, 7, 7 }, /* 150 Mb */
277 [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
278 25800, 8, 8 }, /* 27 Mb */
279 [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
280 49800, 9, 9 }, /* 54 Mb */
281 [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
282 71900, 10, 10 }, /* 81 Mb */
283 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
284 92500, 11, 11 }, /* 108 Mb */
285 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
286 130300, 12, 12 }, /* 162 Mb */
287 [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
288 162800, 13, 13 }, /* 216 Mb */
289 [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
290 178200, 14, 14 }, /* 243 Mb */
291 [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
292 192100, 15, 15 }, /* 270 Mb */
293 [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
294 207000, 15, 15 }, /* 300 Mb */
295 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
296 36100, 16, 16 }, /* 40.5 Mb */
297 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
298 72900, 17, 17 }, /* 81 Mb */
299 [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
300 108300, 18, 18 }, /* 121.5 Mb */
301 [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
302 142000, 19, 19 }, /* 162 Mb */
303 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
304 205100, 20, 20 }, /* 243 Mb */
305 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
306 224700, 20, 20 }, /* 270 Mb */
307 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
308 263100, 21, 21 }, /* 324 Mb */
309 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
310 288000, 21, 21 }, /* 360 Mb */
311 [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
312 290700, 22, 22 }, /* 364.5 Mb */
313 [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
314 317200, 22, 22 }, /* 405 Mb */
315 [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
316 317200, 23, 23 }, /* 405 Mb */
317 [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
318 346400, 23, 23 }, /* 450 Mb */
319 },
320 50, /* probe interval */
321 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
322};
323
324static const struct ath_rate_table ar5416_11a_ratetable = {
325 8,
326 0,
327 {
328 { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
329 5400, 0, 12},
330 { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
331 7800, 1, 18},
332 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
333 10000, 2, 24},
334 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
335 13900, 3, 36},
336 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
337 17300, 4, 48},
338 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
339 23000, 5, 72},
340 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
341 27400, 6, 96},
342 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
343 29300, 7, 108},
344 },
345 50, /* probe interval */
346 0, /* Phy rates allowed initially */
347};
348
349static const struct ath_rate_table ar5416_11g_ratetable = {
350 12,
351 0,
352 {
353 { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
354 900, 0, 2},
355 { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
356 1900, 1, 4},
357 { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
358 4900, 2, 11},
359 { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
360 8100, 3, 22},
361 { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
362 5400, 4, 12},
363 { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
364 7800, 5, 18},
365 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
366 10000, 6, 24},
367 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
368 13900, 7, 36},
369 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
370 17300, 8, 48},
371 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
372 23000, 9, 72},
373 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
374 27400, 10, 96},
375 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
376 29300, 11, 108},
377 },
378 50, /* probe interval */
379 0, /* Phy rates allowed initially */
380};
381
382static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
383 struct ieee80211_tx_rate *rate)
384{
385 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
386 int rix, i, idx = 0;
387
388 if (!(rate->flags & IEEE80211_TX_RC_MCS))
389 return rate->idx;
390
391 for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
392 idx = ath_rc_priv->valid_rate_index[i];
393
394 if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
395 rate_table->info[idx].ratecode == rate->idx)
396 break;
397 }
398
399 rix = idx;
400
401 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
402 rix++;
403
404 return rix;
405}
406
407static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
408{
409 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
410 u8 i, j, idx, idx_next;
411
412 for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
413 for (j = 0; j <= i-1; j++) {
414 idx = ath_rc_priv->valid_rate_index[j];
415 idx_next = ath_rc_priv->valid_rate_index[j+1];
416
417 if (rate_table->info[idx].ratekbps >
418 rate_table->info[idx_next].ratekbps) {
419 ath_rc_priv->valid_rate_index[j] = idx_next;
420 ath_rc_priv->valid_rate_index[j+1] = idx;
421 }
422 }
423 }
424}
425
426static inline
427int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
428 struct ath_rate_priv *ath_rc_priv,
429 u8 cur_valid_txrate,
430 u8 *next_idx)
431{
432 u8 i;
433
434 for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) {
435 if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
436 *next_idx = ath_rc_priv->valid_rate_index[i+1];
437 return 1;
438 }
439 }
440
441 /* No more valid rates */
442 *next_idx = 0;
443
444 return 0;
445}
446
447/* Return true only for single stream */
448
449static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
450{
451 if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
452 return 0;
453 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
454 return 0;
455 if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG))
456 return 0;
457 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
458 return 0;
459 if (!ignore_cw && WLAN_RC_PHY_HT(phy))
460 if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
461 return 0;
462 return 1;
463}
464
465static inline int
466ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
467 u8 cur_valid_txrate, u8 *next_idx)
468{
469 int8_t i;
470
471 for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) {
472 if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
473 *next_idx = ath_rc_priv->valid_rate_index[i-1];
474 return 1;
475 }
476 }
477
478 return 0;
479}
480
481static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
482{
483 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
484 u8 i, hi = 0;
485
486 for (i = 0; i < rate_table->rate_cnt; i++) {
487 if (rate_table->info[i].rate_flags & RC_LEGACY) {
488 u32 phy = rate_table->info[i].phy;
489 u8 valid_rate_count = 0;
490
491 if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
492 continue;
493
494 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
495
496 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
497 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
498 ath_rc_priv->valid_rate_index[i] = true;
499 hi = i;
500 }
501 }
502
503 return hi;
504}
505
506static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
507 u32 phy, u32 capflag)
508{
509 if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
510 return false;
511
512 if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
513 return false;
514
515 if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
516 return false;
517
518 return true;
519}
520
521static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
522 u32 phy, u32 capflag)
523{
524 if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
525 return false;
526
527 if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
528 return false;
529
530 if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
531 return false;
532
533 return true;
534}
535
536static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
537{
538 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
539 struct ath_rateset *rateset;
540 u32 phy, capflag = ath_rc_priv->ht_cap;
541 u16 rate_flags;
542 u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
543
544 if (legacy)
545 rateset = &ath_rc_priv->neg_rates;
546 else
547 rateset = &ath_rc_priv->neg_ht_rates;
548
549 for (i = 0; i < rateset->rs_nrates; i++) {
550 for (j = 0; j < rate_table->rate_cnt; j++) {
551 phy = rate_table->info[j].phy;
552 rate_flags = rate_table->info[j].rate_flags;
553 rate = rateset->rs_rates[i];
554 dot11rate = rate_table->info[j].dot11rate;
555
556 if (legacy &&
557 !ath_rc_check_legacy(rate, dot11rate,
558 rate_flags, phy, capflag))
559 continue;
560
561 if (!legacy &&
562 !ath_rc_check_ht(rate, dot11rate,
563 rate_flags, phy, capflag))
564 continue;
565
566 if (!ath_rc_valid_phyrate(phy, capflag, 0))
567 continue;
568
569 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
570 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
571 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
572 ath_rc_priv->valid_rate_index[j] = true;
573 hi = max(hi, j);
574 }
575 }
576
577 return hi;
578}
579
580static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
581 int *is_probing)
582{
583 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
584 u32 best_thruput, this_thruput, now_msec;
585 u8 rate, next_rate, best_rate, maxindex, minindex;
586 int8_t index = 0;
587
588 now_msec = jiffies_to_msecs(jiffies);
589 *is_probing = 0;
590 best_thruput = 0;
591 maxindex = ath_rc_priv->max_valid_rate-1;
592 minindex = 0;
593 best_rate = minindex;
594
595 /*
596 * Try the higher rate first. It will reduce memory moving time
597 * if we have very good channel characteristics.
598 */
599 for (index = maxindex; index >= minindex ; index--) {
600 u8 per_thres;
601
602 rate = ath_rc_priv->valid_rate_index[index];
603 if (rate > ath_rc_priv->rate_max_phy)
604 continue;
605
606 /*
607 * For TCP the average collision rate is around 11%,
608 * so we ignore PERs less than this. This is to
609 * prevent the rate we are currently using (whose
610 * PER might be in the 10-15 range because of TCP
611 * collisions) looking worse than the next lower
612 * rate whose PER has decayed close to 0. If we
613 * used to next lower rate, its PER would grow to
614 * 10-15 and we would be worse off then staying
615 * at the current rate.
616 */
617 per_thres = ath_rc_priv->per[rate];
618 if (per_thres < 12)
619 per_thres = 12;
620
621 this_thruput = rate_table->info[rate].user_ratekbps *
622 (100 - per_thres);
623
624 if (best_thruput <= this_thruput) {
625 best_thruput = this_thruput;
626 best_rate = rate;
627 }
628 }
629
630 rate = best_rate;
631
632 /*
633 * Must check the actual rate (ratekbps) to account for
634 * non-monoticity of 11g's rate table
635 */
636
637 if (rate >= ath_rc_priv->rate_max_phy) {
638 rate = ath_rc_priv->rate_max_phy;
639
640 /* Probe the next allowed phy state */
641 if (ath_rc_get_nextvalid_txrate(rate_table,
642 ath_rc_priv, rate, &next_rate) &&
643 (now_msec - ath_rc_priv->probe_time >
644 rate_table->probe_interval) &&
645 (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
646 rate = next_rate;
647 ath_rc_priv->probe_rate = rate;
648 ath_rc_priv->probe_time = now_msec;
649 ath_rc_priv->hw_maxretry_pktcnt = 0;
650 *is_probing = 1;
651 }
652 }
653
654 if (rate > (ath_rc_priv->rate_table_size - 1))
655 rate = ath_rc_priv->rate_table_size - 1;
656
657 if (RC_TS_ONLY(rate_table->info[rate].rate_flags) &&
658 (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG))
659 return rate;
660
661 if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) &&
662 (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG)))
663 return rate;
664
665 if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags))
666 return rate;
667
668 /* This should not happen */
669 WARN_ON_ONCE(1);
670
671 rate = ath_rc_priv->valid_rate_index[0];
672
673 return rate;
674}
675
676static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
677 struct ieee80211_tx_rate *rate,
678 struct ieee80211_tx_rate_control *txrc,
679 u8 tries, u8 rix, int rtsctsenable)
680{
681 rate->count = tries;
682 rate->idx = rate_table->info[rix].ratecode;
683
684 if (txrc->rts || rtsctsenable)
685 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
686
687 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
688 rate->flags |= IEEE80211_TX_RC_MCS;
689 if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
690 conf_is_ht40(&txrc->hw->conf))
691 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
692 if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
693 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
694 }
695}
696
697static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
698 const struct ath_rate_table *rate_table,
699 struct ieee80211_tx_info *tx_info)
700{
701 struct ieee80211_bss_conf *bss_conf;
702
703 if (!tx_info->control.vif)
704 return;
705 /*
706 * For legacy frames, mac80211 takes care of CTS protection.
707 */
708 if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
709 return;
710
711 bss_conf = &tx_info->control.vif->bss_conf;
712
713 if (!bss_conf->basic_rates)
714 return;
715
716 /*
717 * For now, use the lowest allowed basic rate for HT frames.
718 */
719 tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
720}
721
722static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
723 struct ieee80211_tx_rate_control *txrc)
724{
725 struct ath_softc *sc = priv;
726 struct ath_rate_priv *ath_rc_priv = priv_sta;
727 const struct ath_rate_table *rate_table;
728 struct sk_buff *skb = txrc->skb;
729 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
730 struct ieee80211_tx_rate *rates = tx_info->control.rates;
731 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
732 __le16 fc = hdr->frame_control;
733 u8 try_per_rate, i = 0, rix;
734 int is_probe = 0;
735
736 if (rate_control_send_low(sta, priv_sta, txrc))
737 return;
738
739 /*
740 * For Multi Rate Retry we use a different number of
741 * retry attempt counts. This ends up looking like this:
742 *
743 * MRR[0] = 4
744 * MRR[1] = 4
745 * MRR[2] = 4
746 * MRR[3] = 8
747 *
748 */
749 try_per_rate = 4;
750
751 rate_table = ath_rc_priv->rate_table;
752 rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
753
754 if (conf_is_ht(&sc->hw->conf) &&
755 (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
756 tx_info->flags |= IEEE80211_TX_CTL_LDPC;
757
758 if (conf_is_ht(&sc->hw->conf) &&
759 (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
760 tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
761
762 if (is_probe) {
763 /*
764 * Set one try for probe rates. For the
765 * probes don't enable RTS.
766 */
767 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
768 1, rix, 0);
769 /*
770 * Get the next tried/allowed rate.
771 * No RTS for the next series after the probe rate.
772 */
773 ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
774 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
775 try_per_rate, rix, 0);
776
777 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
778 } else {
779 /*
780 * Set the chosen rate. No RTS for first series entry.
781 */
782 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
783 try_per_rate, rix, 0);
784 }
785
786 for ( ; i < 4; i++) {
787 /*
788 * Use twice the number of tries for the last MRR segment.
789 */
790 if (i + 1 == 4)
791 try_per_rate = 8;
792
793 ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
794
795 /*
796 * All other rates in the series have RTS enabled.
797 */
798 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
799 try_per_rate, rix, 1);
800 }
801
802 /*
803 * NB:Change rate series to enable aggregation when operating
804 * at lower MCS rates. When first rate in series is MCS2
805 * in HT40 @ 2.4GHz, series should look like:
806 *
807 * {MCS2, MCS1, MCS0, MCS0}.
808 *
809 * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
810 * look like:
811 *
812 * {MCS3, MCS2, MCS1, MCS1}
813 *
814 * So, set fourth rate in series to be same as third one for
815 * above conditions.
816 */
817 if ((sc->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) &&
818 (conf_is_ht(&sc->hw->conf))) {
819 u8 dot11rate = rate_table->info[rix].dot11rate;
820 u8 phy = rate_table->info[rix].phy;
821 if (i == 4 &&
822 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
823 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
824 rates[3].idx = rates[2].idx;
825 rates[3].flags = rates[2].flags;
826 }
827 }
828
829 /*
830 * Force hardware to use computed duration for next
831 * fragment by disabling multi-rate retry, which
832 * updates duration based on the multi-rate duration table.
833 *
834 * FIXME: Fix duration
835 */
836 if (ieee80211_has_morefrags(fc) ||
837 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
838 rates[1].count = rates[2].count = rates[3].count = 0;
839 rates[1].idx = rates[2].idx = rates[3].idx = 0;
840 rates[0].count = ATH_TXMAXTRY;
841 }
842
843 ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
844}
845
846static void ath_rc_update_per(struct ath_softc *sc,
847 const struct ath_rate_table *rate_table,
848 struct ath_rate_priv *ath_rc_priv,
849 struct ieee80211_tx_info *tx_info,
850 int tx_rate, int xretries, int retries,
851 u32 now_msec)
852{
853 int count, n_bad_frames;
854 u8 last_per;
855 static const u32 nretry_to_per_lookup[10] = {
856 100 * 0 / 1,
857 100 * 1 / 4,
858 100 * 1 / 2,
859 100 * 3 / 4,
860 100 * 4 / 5,
861 100 * 5 / 6,
862 100 * 6 / 7,
863 100 * 7 / 8,
864 100 * 8 / 9,
865 100 * 9 / 10
866 };
867
868 last_per = ath_rc_priv->per[tx_rate];
869 n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
870
871 if (xretries) {
872 if (xretries == 1) {
873 ath_rc_priv->per[tx_rate] += 30;
874 if (ath_rc_priv->per[tx_rate] > 100)
875 ath_rc_priv->per[tx_rate] = 100;
876 } else {
877 /* xretries == 2 */
878 count = ARRAY_SIZE(nretry_to_per_lookup);
879 if (retries >= count)
880 retries = count - 1;
881
882 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
883 ath_rc_priv->per[tx_rate] =
884 (u8)(last_per - (last_per >> 3) + (100 >> 3));
885 }
886
887 /* xretries == 1 or 2 */
888
889 if (ath_rc_priv->probe_rate == tx_rate)
890 ath_rc_priv->probe_rate = 0;
891
892 } else { /* xretries == 0 */
893 count = ARRAY_SIZE(nretry_to_per_lookup);
894 if (retries >= count)
895 retries = count - 1;
896
897 if (n_bad_frames) {
898 /* new_PER = 7/8*old_PER + 1/8*(currentPER)
899 * Assuming that n_frames is not 0. The current PER
900 * from the retries is 100 * retries / (retries+1),
901 * since the first retries attempts failed, and the
902 * next one worked. For the one that worked,
903 * n_bad_frames subframes out of n_frames wored,
904 * so the PER for that part is
905 * 100 * n_bad_frames / n_frames, and it contributes
906 * 100 * n_bad_frames / (n_frames * (retries+1)) to
907 * the above PER. The expression below is a
908 * simplified version of the sum of these two terms.
909 */
910 if (tx_info->status.ampdu_len > 0) {
911 int n_frames, n_bad_tries;
912 u8 cur_per, new_per;
913
914 n_bad_tries = retries * tx_info->status.ampdu_len +
915 n_bad_frames;
916 n_frames = tx_info->status.ampdu_len * (retries + 1);
917 cur_per = (100 * n_bad_tries / n_frames) >> 3;
918 new_per = (u8)(last_per - (last_per >> 3) + cur_per);
919 ath_rc_priv->per[tx_rate] = new_per;
920 }
921 } else {
922 ath_rc_priv->per[tx_rate] =
923 (u8)(last_per - (last_per >> 3) +
924 (nretry_to_per_lookup[retries] >> 3));
925 }
926
927
928 /*
929 * If we got at most one retry then increase the max rate if
930 * this was a probe. Otherwise, ignore the probe.
931 */
932 if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
933 if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
934 /*
935 * Since we probed with just a single attempt,
936 * any retries means the probe failed. Also,
937 * if the attempt worked, but more than half
938 * the subframes were bad then also consider
939 * the probe a failure.
940 */
941 ath_rc_priv->probe_rate = 0;
942 } else {
943 u8 probe_rate = 0;
944
945 ath_rc_priv->rate_max_phy =
946 ath_rc_priv->probe_rate;
947 probe_rate = ath_rc_priv->probe_rate;
948
949 if (ath_rc_priv->per[probe_rate] > 30)
950 ath_rc_priv->per[probe_rate] = 20;
951
952 ath_rc_priv->probe_rate = 0;
953
954 /*
955 * Since this probe succeeded, we allow the next
956 * probe twice as soon. This allows the maxRate
957 * to move up faster if the probes are
958 * successful.
959 */
960 ath_rc_priv->probe_time =
961 now_msec - rate_table->probe_interval / 2;
962 }
963 }
964
965 if (retries > 0) {
966 /*
967 * Don't update anything. We don't know if
968 * this was because of collisions or poor signal.
969 */
970 ath_rc_priv->hw_maxretry_pktcnt = 0;
971 } else {
972 /*
973 * It worked with no retries. First ignore bogus (small)
974 * rssi_ack values.
975 */
976 if (tx_rate == ath_rc_priv->rate_max_phy &&
977 ath_rc_priv->hw_maxretry_pktcnt < 255) {
978 ath_rc_priv->hw_maxretry_pktcnt++;
979 }
980
981 }
982 }
983}
984
985static void ath_rc_update_ht(struct ath_softc *sc,
986 struct ath_rate_priv *ath_rc_priv,
987 struct ieee80211_tx_info *tx_info,
988 int tx_rate, int xretries, int retries)
989{
990 u32 now_msec = jiffies_to_msecs(jiffies);
991 int rate;
992 u8 last_per;
993 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
994 int size = ath_rc_priv->rate_table_size;
995
996 if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
997 return;
998
999 last_per = ath_rc_priv->per[tx_rate];
1000
1001 /* Update PER first */
1002 ath_rc_update_per(sc, rate_table, ath_rc_priv,
1003 tx_info, tx_rate, xretries,
1004 retries, now_msec);
1005
1006 /*
1007 * If this rate looks bad (high PER) then stop using it for
1008 * a while (except if we are probing).
1009 */
1010 if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
1011 rate_table->info[tx_rate].ratekbps <=
1012 rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
1013 ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
1014 &ath_rc_priv->rate_max_phy);
1015
1016 /* Don't probe for a little while. */
1017 ath_rc_priv->probe_time = now_msec;
1018 }
1019
1020 /* Make sure the rates below this have lower PER */
1021 /* Monotonicity is kept only for rates below the current rate. */
1022 if (ath_rc_priv->per[tx_rate] < last_per) {
1023 for (rate = tx_rate - 1; rate >= 0; rate--) {
1024
1025 if (ath_rc_priv->per[rate] >
1026 ath_rc_priv->per[rate+1]) {
1027 ath_rc_priv->per[rate] =
1028 ath_rc_priv->per[rate+1];
1029 }
1030 }
1031 }
1032
1033 /* Maintain monotonicity for rates above the current rate */
1034 for (rate = tx_rate; rate < size - 1; rate++) {
1035 if (ath_rc_priv->per[rate+1] <
1036 ath_rc_priv->per[rate])
1037 ath_rc_priv->per[rate+1] =
1038 ath_rc_priv->per[rate];
1039 }
1040
1041 /* Every so often, we reduce the thresholds
1042 * and PER (different for CCK and OFDM). */
1043 if (now_msec - ath_rc_priv->per_down_time >=
1044 rate_table->probe_interval) {
1045 for (rate = 0; rate < size; rate++) {
1046 ath_rc_priv->per[rate] =
1047 7 * ath_rc_priv->per[rate] / 8;
1048 }
1049
1050 ath_rc_priv->per_down_time = now_msec;
1051 }
1052
1053 ath_debug_stat_retries(ath_rc_priv, tx_rate, xretries, retries,
1054 ath_rc_priv->per[tx_rate]);
1055
1056}
1057
1058static void ath_rc_tx_status(struct ath_softc *sc,
1059 struct ath_rate_priv *ath_rc_priv,
1060 struct sk_buff *skb)
1061{
1062 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1063 struct ieee80211_tx_rate *rates = tx_info->status.rates;
1064 struct ieee80211_tx_rate *rate;
1065 int final_ts_idx = 0, xretries = 0, long_retry = 0;
1066 u8 flags;
1067 u32 i = 0, rix;
1068
1069 for (i = 0; i < sc->hw->max_rates; i++) {
1070 rate = &tx_info->status.rates[i];
1071 if (rate->idx < 0 || !rate->count)
1072 break;
1073
1074 final_ts_idx = i;
1075 long_retry = rate->count - 1;
1076 }
1077
1078 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
1079 xretries = 1;
1080
1081 /*
1082 * If the first rate is not the final index, there
1083 * are intermediate rate failures to be processed.
1084 */
1085 if (final_ts_idx != 0) {
1086 for (i = 0; i < final_ts_idx ; i++) {
1087 if (rates[i].count != 0 && (rates[i].idx >= 0)) {
1088 flags = rates[i].flags;
1089
1090 /* If HT40 and we have switched mode from
1091 * 40 to 20 => don't update */
1092
1093 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1094 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
1095 return;
1096
1097 rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
1098 ath_rc_update_ht(sc, ath_rc_priv, tx_info,
1099 rix, xretries ? 1 : 2,
1100 rates[i].count);
1101 }
1102 }
1103 }
1104
1105 flags = rates[final_ts_idx].flags;
1106
1107 /* If HT40 and we have switched mode from 40 to 20 => don't update */
1108 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1109 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
1110 return;
1111
1112 rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
1113 ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
1114 ath_debug_stat_rc(ath_rc_priv, rix);
1115}
1116
1117static const
1118struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1119 enum ieee80211_band band,
1120 bool is_ht)
1121{
1122 switch(band) {
1123 case IEEE80211_BAND_2GHZ:
1124 if (is_ht)
1125 return &ar5416_11ng_ratetable;
1126 return &ar5416_11g_ratetable;
1127 case IEEE80211_BAND_5GHZ:
1128 if (is_ht)
1129 return &ar5416_11na_ratetable;
1130 return &ar5416_11a_ratetable;
1131 default:
1132 return NULL;
1133 }
1134}
1135
1136static void ath_rc_init(struct ath_softc *sc,
1137 struct ath_rate_priv *ath_rc_priv)
1138{
1139 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
1140 struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
1141 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1142 u8 i, j, k, hi = 0, hthi = 0;
1143
1144 ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
1145
1146 for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
1147 ath_rc_priv->per[i] = 0;
1148 ath_rc_priv->valid_rate_index[i] = 0;
1149 }
1150
1151 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1152 for (j = 0; j < RATE_TABLE_SIZE; j++)
1153 ath_rc_priv->valid_phy_rateidx[i][j] = 0;
1154 ath_rc_priv->valid_phy_ratecnt[i] = 0;
1155 }
1156
1157 if (!rateset->rs_nrates) {
1158 hi = ath_rc_init_validrates(ath_rc_priv);
1159 } else {
1160 hi = ath_rc_setvalid_rates(ath_rc_priv, true);
1161
1162 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
1163 hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
1164
1165 hi = max(hi, hthi);
1166 }
1167
1168 ath_rc_priv->rate_table_size = hi + 1;
1169 ath_rc_priv->rate_max_phy = 0;
1170 WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1171
1172 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1173 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
1174 ath_rc_priv->valid_rate_index[k++] =
1175 ath_rc_priv->valid_phy_rateidx[i][j];
1176 }
1177
1178 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
1179 !ath_rc_priv->valid_phy_ratecnt[i])
1180 continue;
1181
1182 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
1183 }
1184 WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1185 WARN_ON(k > RATE_TABLE_SIZE);
1186
1187 ath_rc_priv->max_valid_rate = k;
1188 ath_rc_sort_validrates(ath_rc_priv);
1189 ath_rc_priv->rate_max_phy = (k > 4) ?
1190 ath_rc_priv->valid_rate_index[k-4] :
1191 ath_rc_priv->valid_rate_index[k-1];
1192
1193 ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
1194 ath_rc_priv->ht_cap);
1195}
1196
1197static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
1198{
1199 u8 caps = 0;
1200
1201 if (sta->ht_cap.ht_supported) {
1202 caps = WLAN_RC_HT_FLAG;
1203 if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2])
1204 caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
1205 else if (sta->ht_cap.mcs.rx_mask[1])
1206 caps |= WLAN_RC_DS_FLAG;
1207 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
1208 caps |= WLAN_RC_40_FLAG;
1209 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
1210 caps |= WLAN_RC_SGI_FLAG;
1211 } else {
1212 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
1213 caps |= WLAN_RC_SGI_FLAG;
1214 }
1215 }
1216
1217 return caps;
1218}
1219
1220static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
1221 u8 tidno)
1222{
1223 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1224 struct ath_atx_tid *txtid;
1225
1226 if (!sta->ht_cap.ht_supported)
1227 return false;
1228
1229 txtid = ATH_AN_2_TID(an, tidno);
1230 return !txtid->active;
1231}
1232
1233
1234/***********************************/
1235/* mac80211 Rate Control callbacks */
1236/***********************************/
1237
1238static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1239 struct ieee80211_sta *sta, void *priv_sta,
1240 struct sk_buff *skb)
1241{
1242 struct ath_softc *sc = priv;
1243 struct ath_rate_priv *ath_rc_priv = priv_sta;
1244 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1245 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1246 __le16 fc = hdr->frame_control;
1247
1248 if (!priv_sta || !ieee80211_is_data(fc))
1249 return;
1250
1251 /* This packet was aggregated but doesn't carry status info */
1252 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
1253 !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
1254 return;
1255
1256 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
1257 return;
1258
1259 ath_rc_tx_status(sc, ath_rc_priv, skb);
1260
1261 /* Check if aggregation has to be enabled for this tid */
1262 if (conf_is_ht(&sc->hw->conf) &&
1263 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
1264 if (ieee80211_is_data_qos(fc) &&
1265 skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
1266 u8 *qc, tid;
1267
1268 qc = ieee80211_get_qos_ctl(hdr);
1269 tid = qc[0] & 0xf;
1270
1271 if(ath_tx_aggr_check(sc, sta, tid))
1272 ieee80211_start_tx_ba_session(sta, tid, 0);
1273 }
1274 }
1275}
1276
1277static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1278 struct cfg80211_chan_def *chandef,
1279 struct ieee80211_sta *sta, void *priv_sta)
1280{
1281 struct ath_softc *sc = priv;
1282 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1283 struct ath_rate_priv *ath_rc_priv = priv_sta;
1284 int i, j = 0;
1285 u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
1286
1287 for (i = 0; i < sband->n_bitrates; i++) {
1288 if (sta->supp_rates[sband->band] & BIT(i)) {
1289 if ((rate_flags & sband->bitrates[i].flags)
1290 != rate_flags)
1291 continue;
1292
1293 ath_rc_priv->neg_rates.rs_rates[j]
1294 = (sband->bitrates[i].bitrate * 2) / 10;
1295 j++;
1296 }
1297 }
1298 ath_rc_priv->neg_rates.rs_nrates = j;
1299
1300 if (sta->ht_cap.ht_supported) {
1301 for (i = 0, j = 0; i < 77; i++) {
1302 if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
1303 ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
1304 if (j == ATH_RATE_MAX)
1305 break;
1306 }
1307 ath_rc_priv->neg_ht_rates.rs_nrates = j;
1308 }
1309
1310 ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
1311 sta->ht_cap.ht_supported);
1312 if (!ath_rc_priv->rate_table) {
1313 ath_err(common, "No rate table chosen\n");
1314 return;
1315 }
1316
1317 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
1318 ath_rc_init(sc, priv_sta);
1319}
1320
1321static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1322 struct cfg80211_chan_def *chandef,
1323 struct ieee80211_sta *sta, void *priv_sta,
1324 u32 changed)
1325{
1326 struct ath_softc *sc = priv;
1327 struct ath_rate_priv *ath_rc_priv = priv_sta;
1328
1329 if (changed & IEEE80211_RC_BW_CHANGED) {
1330 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
1331 ath_rc_init(sc, priv_sta);
1332
1333 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
1334 "Operating Bandwidth changed to: %d\n",
1335 sc->hw->conf.chandef.width);
1336 }
1337}
1338
1339#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
1340
1341void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1342{
1343 struct ath_rc_stats *stats;
1344
1345 stats = &rc->rcstats[final_rate];
1346 stats->success++;
1347}
1348
1349void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
1350 int xretries, int retries, u8 per)
1351{
1352 struct ath_rc_stats *stats = &rc->rcstats[rix];
1353
1354 stats->xretries += xretries;
1355 stats->retries += retries;
1356 stats->per = per;
1357}
1358
1359static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1360 size_t count, loff_t *ppos)
1361{
1362 struct ath_rate_priv *rc = file->private_data;
1363 char *buf;
1364 unsigned int len = 0, max;
1365 int rix;
1366 ssize_t retval;
1367
1368 if (rc->rate_table == NULL)
1369 return 0;
1370
1371 max = 80 + rc->rate_table_size * 1024 + 1;
1372 buf = kmalloc(max, GFP_KERNEL);
1373 if (buf == NULL)
1374 return -ENOMEM;
1375
1376 len += sprintf(buf, "%6s %6s %6s "
1377 "%10s %10s %10s %10s\n",
1378 "HT", "MCS", "Rate",
1379 "Success", "Retries", "XRetries", "PER");
1380
1381 for (rix = 0; rix < rc->max_valid_rate; rix++) {
1382 u8 i = rc->valid_rate_index[rix];
1383 u32 ratekbps = rc->rate_table->info[i].ratekbps;
1384 struct ath_rc_stats *stats = &rc->rcstats[i];
1385 char mcs[5];
1386 char htmode[5];
1387 int used_mcs = 0, used_htmode = 0;
1388
1389 if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
1390 used_mcs = scnprintf(mcs, 5, "%d",
1391 rc->rate_table->info[i].ratecode);
1392
1393 if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
1394 used_htmode = scnprintf(htmode, 5, "HT40");
1395 else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
1396 used_htmode = scnprintf(htmode, 5, "HT20");
1397 else
1398 used_htmode = scnprintf(htmode, 5, "????");
1399 }
1400
1401 mcs[used_mcs] = '\0';
1402 htmode[used_htmode] = '\0';
1403
1404 len += scnprintf(buf + len, max - len,
1405 "%6s %6s %3u.%d: "
1406 "%10u %10u %10u %10u\n",
1407 htmode,
1408 mcs,
1409 ratekbps / 1000,
1410 (ratekbps % 1000) / 100,
1411 stats->success,
1412 stats->retries,
1413 stats->xretries,
1414 stats->per);
1415 }
1416
1417 if (len > max)
1418 len = max;
1419
1420 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1421 kfree(buf);
1422 return retval;
1423}
1424
1425static const struct file_operations fops_rcstat = {
1426 .read = read_file_rcstat,
1427 .open = simple_open,
1428 .owner = THIS_MODULE
1429};
1430
1431static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
1432 struct dentry *dir)
1433{
1434 struct ath_rate_priv *rc = priv_sta;
1435 rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
1436 dir, rc, &fops_rcstat);
1437}
1438
1439static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
1440{
1441 struct ath_rate_priv *rc = priv_sta;
1442 debugfs_remove(rc->debugfs_rcstats);
1443}
1444
1445#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
1446
1447static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1448{
1449 return hw->priv;
1450}
1451
1452static void ath_rate_free(void *priv)
1453{
1454 return;
1455}
1456
1457static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
1458{
1459 return kzalloc(sizeof(struct ath_rate_priv), gfp);
1460}
1461
1462static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
1463 void *priv_sta)
1464{
1465 struct ath_rate_priv *rate_priv = priv_sta;
1466 kfree(rate_priv);
1467}
1468
1469static struct rate_control_ops ath_rate_ops = {
1470 .module = NULL,
1471 .name = "ath9k_rate_control",
1472 .tx_status = ath_tx_status,
1473 .get_rate = ath_get_rate,
1474 .rate_init = ath_rate_init,
1475 .rate_update = ath_rate_update,
1476 .alloc = ath_rate_alloc,
1477 .free = ath_rate_free,
1478 .alloc_sta = ath_rate_alloc_sta,
1479 .free_sta = ath_rate_free_sta,
1480
1481#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
1482 .add_sta_debugfs = ath_rate_add_sta_debugfs,
1483 .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
1484#endif
1485};
1486
1487int ath_rate_control_register(void)
1488{
1489 return ieee80211_rate_control_register(&ath_rate_ops);
1490}
1491
1492void ath_rate_control_unregister(void)
1493{
1494 ieee80211_rate_control_unregister(&ath_rate_ops);
1495}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
deleted file mode 100644
index b9a87383cb43..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2 * Copyright (c) 2004 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004 Video54 Technologies, Inc.
4 * Copyright (c) 2008-2011 Atheros Communications Inc.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef RC_H
20#define RC_H
21
22#include "hw.h"
23
24struct ath_softc;
25
26#define ATH_RATE_MAX 30
27#define RATE_TABLE_SIZE 72
28
29#define RC_INVALID 0x0000
30#define RC_LEGACY 0x0001
31#define RC_SS 0x0002
32#define RC_DS 0x0004
33#define RC_TS 0x0008
34#define RC_HT_20 0x0010
35#define RC_HT_40 0x0020
36
37#define RC_STREAM_MASK 0xe
38#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \
39 (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
40#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS)
41#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY))
42
43#define RC_HT_2040 (RC_HT_20 | RC_HT_40)
44#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS)
45#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS)
46#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS)
47#define RC_HT_S_20 (RC_HT_20 | RC_SS)
48#define RC_HT_D_20 (RC_HT_20 | RC_DS)
49#define RC_HT_T_20 (RC_HT_20 | RC_TS)
50#define RC_HT_S_40 (RC_HT_40 | RC_SS)
51#define RC_HT_D_40 (RC_HT_40 | RC_DS)
52#define RC_HT_T_40 (RC_HT_40 | RC_TS)
53
54#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS)
55#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS)
56#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS)
57#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS)
58
59#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS)
60#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
61
62#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS)
63#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS)
64
65#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
66
67enum {
68 WLAN_RC_PHY_OFDM,
69 WLAN_RC_PHY_CCK,
70 WLAN_RC_PHY_HT_20_SS,
71 WLAN_RC_PHY_HT_20_DS,
72 WLAN_RC_PHY_HT_20_TS,
73 WLAN_RC_PHY_HT_40_SS,
74 WLAN_RC_PHY_HT_40_DS,
75 WLAN_RC_PHY_HT_40_TS,
76 WLAN_RC_PHY_HT_20_SS_HGI,
77 WLAN_RC_PHY_HT_20_DS_HGI,
78 WLAN_RC_PHY_HT_20_TS_HGI,
79 WLAN_RC_PHY_HT_40_SS_HGI,
80 WLAN_RC_PHY_HT_40_DS_HGI,
81 WLAN_RC_PHY_HT_40_TS_HGI,
82 WLAN_RC_PHY_MAX
83};
84
85#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
86 || (_phy == WLAN_RC_PHY_HT_40_DS) \
87 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
88 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
89#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \
90 || (_phy == WLAN_RC_PHY_HT_40_TS) \
91 || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
92 || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
93#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
94 || (_phy == WLAN_RC_PHY_HT_20_DS) \
95 || (_phy == WLAN_RC_PHY_HT_20_TS) \
96 || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
97 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
98 || (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
99#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
100 || (_phy == WLAN_RC_PHY_HT_40_DS) \
101 || (_phy == WLAN_RC_PHY_HT_40_TS) \
102 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
103 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
104 || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
105#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
106 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
107 || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
108 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
109 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
110 || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
111
112#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
113
114#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
115 ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
116
117#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \
118 (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
119
120/* Return TRUE if flag supports HT20 && client supports HT20 or
121 * return TRUE if flag supports HT40 && client supports HT40.
122 * This is used becos some rates overlap between HT20/HT40.
123 */
124#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
125 (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
126 ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
127
128#define WLAN_RC_DS_FLAG (0x01)
129#define WLAN_RC_TS_FLAG (0x02)
130#define WLAN_RC_40_FLAG (0x04)
131#define WLAN_RC_SGI_FLAG (0x08)
132#define WLAN_RC_HT_FLAG (0x10)
133
134/**
135 * struct ath_rate_table - Rate Control table
136 * @rate_cnt: total number of rates for the given wireless mode
137 * @mcs_start: MCS rate index offset
138 * @rate_flags: Rate Control flags
139 * @phy: CCK/OFDM/HT20/HT40
140 * @ratekbps: rate in Kbits per second
141 * @user_ratekbps: user rate in Kbits per second
142 * @ratecode: rate that goes into HW descriptors
143 * @dot11rate: value that goes into supported
144 * rates info element of MLME
145 * @ctrl_rate: Index of next lower basic rate, used for duration computation
146 * @cw40index: Index of rates having 40MHz channel width
147 * @sgi_index: Index of rates having Short Guard Interval
148 * @ht_index: high throughput rates having 40MHz channel width and
149 * Short Guard Interval
150 * @probe_interval: interval for rate control to probe for other rates
151 * @initial_ratemax: initial ratemax value
152 */
153struct ath_rate_table {
154 int rate_cnt;
155 int mcs_start;
156 struct {
157 u16 rate_flags;
158 u8 phy;
159 u32 ratekbps;
160 u32 user_ratekbps;
161 u8 ratecode;
162 u8 dot11rate;
163 } info[RATE_TABLE_SIZE];
164 u32 probe_interval;
165 u8 initial_ratemax;
166};
167
168struct ath_rateset {
169 u8 rs_nrates;
170 u8 rs_rates[ATH_RATE_MAX];
171};
172
173struct ath_rc_stats {
174 u32 success;
175 u32 retries;
176 u32 xretries;
177 u8 per;
178};
179
180/**
181 * struct ath_rate_priv - Rate Control priv data
182 * @state: RC state
183 * @probe_rate: rate we are probing at
184 * @probe_time: msec timestamp for last probe
185 * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
186 * @max_valid_rate: maximum number of valid rate
187 * @per_down_time: msec timestamp for last PER down step
188 * @valid_phy_ratecnt: valid rate count
189 * @rate_max_phy: phy index for the max rate
190 * @per: PER for every valid rate in %
191 * @probe_interval: interval for ratectrl to probe for other rates
192 * @ht_cap: HT capabilities
193 * @neg_rates: Negotatied rates
194 * @neg_ht_rates: Negotiated HT rates
195 */
196struct ath_rate_priv {
197 u8 rate_table_size;
198 u8 probe_rate;
199 u8 hw_maxretry_pktcnt;
200 u8 max_valid_rate;
201 u8 valid_rate_index[RATE_TABLE_SIZE];
202 u8 ht_cap;
203 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
204 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
205 u8 rate_max_phy;
206 u8 per[RATE_TABLE_SIZE];
207 u32 probe_time;
208 u32 per_down_time;
209 u32 probe_interval;
210 struct ath_rateset neg_rates;
211 struct ath_rateset neg_ht_rates;
212 const struct ath_rate_table *rate_table;
213
214#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
215 struct dentry *debugfs_rcstats;
216 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
217#endif
218};
219
220#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
221void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
222void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
223 int xretries, int retries, u8 per);
224#else
225static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
226{
227}
228static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
229 int xretries, int retries, u8 per)
230{
231}
232#endif
233
234#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
235int ath_rate_control_register(void);
236void ath_rate_control_unregister(void);
237#else
238static inline int ath_rate_control_register(void)
239{
240 return 0;
241}
242
243static inline void ath_rate_control_unregister(void)
244{
245}
246#endif
247
248#endif /* RC_H */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 82e340d3ec60..6c9accdb52e4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -762,204 +762,6 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
762 return bf; 762 return bf;
763} 763}
764 764
765/* Assumes you've already done the endian to CPU conversion */
766static bool ath9k_rx_accept(struct ath_common *common,
767 struct ieee80211_hdr *hdr,
768 struct ieee80211_rx_status *rxs,
769 struct ath_rx_status *rx_stats,
770 bool *decrypt_error)
771{
772 struct ath_softc *sc = (struct ath_softc *) common->priv;
773 bool is_mc, is_valid_tkip, strip_mic, mic_error;
774 struct ath_hw *ah = common->ah;
775 __le16 fc;
776
777 fc = hdr->frame_control;
778
779 is_mc = !!is_multicast_ether_addr(hdr->addr1);
780 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
781 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
782 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
783 ieee80211_has_protected(fc) &&
784 !(rx_stats->rs_status &
785 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
786 ATH9K_RXERR_KEYMISS));
787
788 /*
789 * Key miss events are only relevant for pairwise keys where the
790 * descriptor does contain a valid key index. This has been observed
791 * mostly with CCMP encryption.
792 */
793 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
794 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
795 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
796
797 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
798 !ieee80211_has_morefrags(fc) &&
799 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
800 (rx_stats->rs_status & ATH9K_RXERR_MIC);
801
802 /*
803 * The rx_stats->rs_status will not be set until the end of the
804 * chained descriptors so it can be ignored if rs_more is set. The
805 * rs_more will be false at the last element of the chained
806 * descriptors.
807 */
808 if (rx_stats->rs_status != 0) {
809 u8 status_mask;
810
811 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
812 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
813 mic_error = false;
814 }
815
816 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
817 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
818 *decrypt_error = true;
819 mic_error = false;
820 }
821
822 /*
823 * Reject error frames with the exception of
824 * decryption and MIC failures. For monitor mode,
825 * we also ignore the CRC error.
826 */
827 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
828 ATH9K_RXERR_KEYMISS;
829
830 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
831 status_mask |= ATH9K_RXERR_CRC;
832
833 if (rx_stats->rs_status & ~status_mask)
834 return false;
835 }
836
837 /*
838 * For unicast frames the MIC error bit can have false positives,
839 * so all MIC error reports need to be validated in software.
840 * False negatives are not common, so skip software verification
841 * if the hardware considers the MIC valid.
842 */
843 if (strip_mic)
844 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
845 else if (is_mc && mic_error)
846 rxs->flag |= RX_FLAG_MMIC_ERROR;
847
848 return true;
849}
850
851static int ath9k_process_rate(struct ath_common *common,
852 struct ieee80211_hw *hw,
853 struct ath_rx_status *rx_stats,
854 struct ieee80211_rx_status *rxs)
855{
856 struct ieee80211_supported_band *sband;
857 enum ieee80211_band band;
858 unsigned int i = 0;
859 struct ath_softc __maybe_unused *sc = common->priv;
860 struct ath_hw *ah = sc->sc_ah;
861
862 band = ah->curchan->chan->band;
863 sband = hw->wiphy->bands[band];
864
865 if (IS_CHAN_QUARTER_RATE(ah->curchan))
866 rxs->flag |= RX_FLAG_5MHZ;
867 else if (IS_CHAN_HALF_RATE(ah->curchan))
868 rxs->flag |= RX_FLAG_10MHZ;
869
870 if (rx_stats->rs_rate & 0x80) {
871 /* HT rate */
872 rxs->flag |= RX_FLAG_HT;
873 rxs->flag |= rx_stats->flag;
874 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
875 return 0;
876 }
877
878 for (i = 0; i < sband->n_bitrates; i++) {
879 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
880 rxs->rate_idx = i;
881 return 0;
882 }
883 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
884 rxs->flag |= RX_FLAG_SHORTPRE;
885 rxs->rate_idx = i;
886 return 0;
887 }
888 }
889
890 /*
891 * No valid hardware bitrate found -- we should not get here
892 * because hardware has already validated this frame as OK.
893 */
894 ath_dbg(common, ANY,
895 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
896 rx_stats->rs_rate);
897 RX_STAT_INC(rx_rate_err);
898 return -EINVAL;
899}
900
901static void ath9k_process_rssi(struct ath_common *common,
902 struct ieee80211_hw *hw,
903 struct ath_rx_status *rx_stats,
904 struct ieee80211_rx_status *rxs)
905{
906 struct ath_softc *sc = hw->priv;
907 struct ath_hw *ah = common->ah;
908 int last_rssi;
909 int rssi = rx_stats->rs_rssi;
910 int i, j;
911
912 /*
913 * RSSI is not available for subframes in an A-MPDU.
914 */
915 if (rx_stats->rs_moreaggr) {
916 rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
917 return;
918 }
919
920 /*
921 * Check if the RSSI for the last subframe in an A-MPDU
922 * or an unaggregated frame is valid.
923 */
924 if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
925 rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
926 return;
927 }
928
929 for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
930 s8 rssi;
931
932 if (!(ah->rxchainmask & BIT(i)))
933 continue;
934
935 rssi = rx_stats->rs_rssi_ctl[i];
936 if (rssi != ATH9K_RSSI_BAD) {
937 rxs->chains |= BIT(j);
938 rxs->chain_signal[j] = ah->noise + rssi;
939 }
940 j++;
941 }
942
943 /*
944 * Update Beacon RSSI, this is used by ANI.
945 */
946 if (rx_stats->is_mybeacon &&
947 ((ah->opmode == NL80211_IFTYPE_STATION) ||
948 (ah->opmode == NL80211_IFTYPE_ADHOC))) {
949 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
950 last_rssi = sc->last_rssi;
951
952 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
953 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
954 if (rssi < 0)
955 rssi = 0;
956
957 ah->stats.avgbrssi = rssi;
958 }
959
960 rxs->signal = ah->noise + rx_stats->rs_rssi;
961}
962
963static void ath9k_process_tsf(struct ath_rx_status *rs, 765static void ath9k_process_tsf(struct ath_rx_status *rs,
964 struct ieee80211_rx_status *rxs, 766 struct ieee80211_rx_status *rxs,
965 u64 tsf) 767 u64 tsf)
@@ -1055,7 +857,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
1055 * everything but the rate is checked here, the rate check is done 857 * everything but the rate is checked here, the rate check is done
1056 * separately to avoid doing two lookups for a rate for each frame. 858 * separately to avoid doing two lookups for a rate for each frame.
1057 */ 859 */
1058 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 860 if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter))
1059 return -EINVAL; 861 return -EINVAL;
1060 862
1061 if (ath_is_mybeacon(common, hdr)) { 863 if (ath_is_mybeacon(common, hdr)) {
@@ -1069,10 +871,18 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
1069 if (WARN_ON(!ah->curchan)) 871 if (WARN_ON(!ah->curchan))
1070 return -EINVAL; 872 return -EINVAL;
1071 873
1072 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 874 if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
875 /*
876 * No valid hardware bitrate found -- we should not get here
877 * because hardware has already validated this frame as OK.
878 */
879 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
880 rx_stats->rs_rate);
881 RX_STAT_INC(rx_rate_err);
1073 return -EINVAL; 882 return -EINVAL;
883 }
1074 884
1075 ath9k_process_rssi(common, hw, rx_stats, rx_status); 885 ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
1076 886
1077 rx_status->band = ah->curchan->chan->band; 887 rx_status->band = ah->curchan->chan->band;
1078 rx_status->freq = ah->curchan->chan->center_freq; 888 rx_status->freq = ah->curchan->chan->center_freq;
@@ -1092,57 +902,6 @@ corrupt:
1092 return -EINVAL; 902 return -EINVAL;
1093} 903}
1094 904
1095static void ath9k_rx_skb_postprocess(struct ath_common *common,
1096 struct sk_buff *skb,
1097 struct ath_rx_status *rx_stats,
1098 struct ieee80211_rx_status *rxs,
1099 bool decrypt_error)
1100{
1101 struct ath_hw *ah = common->ah;
1102 struct ieee80211_hdr *hdr;
1103 int hdrlen, padpos, padsize;
1104 u8 keyix;
1105 __le16 fc;
1106
1107 /* see if any padding is done by the hw and remove it */
1108 hdr = (struct ieee80211_hdr *) skb->data;
1109 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1110 fc = hdr->frame_control;
1111 padpos = ieee80211_hdrlen(fc);
1112
1113 /* The MAC header is padded to have 32-bit boundary if the
1114 * packet payload is non-zero. The general calculation for
1115 * padsize would take into account odd header lengths:
1116 * padsize = (4 - padpos % 4) % 4; However, since only
1117 * even-length headers are used, padding can only be 0 or 2
1118 * bytes and we can optimize this a bit. In addition, we must
1119 * not try to remove padding from short control frames that do
1120 * not have payload. */
1121 padsize = padpos & 3;
1122 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1123 memmove(skb->data + padsize, skb->data, padpos);
1124 skb_pull(skb, padsize);
1125 }
1126
1127 keyix = rx_stats->rs_keyix;
1128
1129 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1130 ieee80211_has_protected(fc)) {
1131 rxs->flag |= RX_FLAG_DECRYPTED;
1132 } else if (ieee80211_has_protected(fc)
1133 && !decrypt_error && skb->len >= hdrlen + 4) {
1134 keyix = skb->data[hdrlen + 3] >> 6;
1135
1136 if (test_bit(keyix, common->keymap))
1137 rxs->flag |= RX_FLAG_DECRYPTED;
1138 }
1139 if (ah->sw_mgmt_crypto &&
1140 (rxs->flag & RX_FLAG_DECRYPTED) &&
1141 ieee80211_is_mgmt(fc))
1142 /* Use software decrypt for management frames. */
1143 rxs->flag &= ~RX_FLAG_DECRYPTED;
1144}
1145
1146/* 905/*
1147 * Run the LNA combining algorithm only in these cases: 906 * Run the LNA combining algorithm only in these cases:
1148 * 907 *
@@ -1292,8 +1051,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1292 skb_pull(skb, ah->caps.rx_status_len); 1051 skb_pull(skb, ah->caps.rx_status_len);
1293 1052
1294 if (!rs.rs_more) 1053 if (!rs.rs_more)
1295 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1054 ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
1296 rxs, decrypt_error); 1055 rxs, decrypt_error);
1297 1056
1298 if (rs.rs_more) { 1057 if (rs.rs_more) {
1299 RX_STAT_INC(rx_frags); 1058 RX_STAT_INC(rx_frags);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index b686a7498450..a65cfb91adca 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -108,7 +108,7 @@ static int ath9k_tx99_init(struct ath_softc *sc)
108 struct ath_tx_control txctl; 108 struct ath_tx_control txctl;
109 int r; 109 int r;
110 110
111 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) { 111 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
112 ath_err(common, 112 ath_err(common,
113 "driver is in invalid state unable to use TX99"); 113 "driver is in invalid state unable to use TX99");
114 return -EINVAL; 114 return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 1b3230fa3651..2879887f5691 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -198,7 +198,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
198 ath_cancel_work(sc); 198 ath_cancel_work(sc);
199 ath_stop_ani(sc); 199 ath_stop_ani(sc);
200 200
201 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) { 201 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
202 ath_dbg(common, ANY, "Device not present\n"); 202 ath_dbg(common, ANY, "Device not present\n");
203 ret = -EINVAL; 203 ret = -EINVAL;
204 goto fail_wow; 204 goto fail_wow;
@@ -224,7 +224,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
224 * STA. 224 * STA.
225 */ 225 */
226 226
227 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { 227 if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
228 ath_dbg(common, WOW, "None of the STA vifs are associated\n"); 228 ath_dbg(common, WOW, "None of the STA vifs are associated\n");
229 ret = 1; 229 ret = 1;
230 goto fail_wow; 230 goto fail_wow;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 55897d508a76..87cbec47fb48 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1040,11 +1040,11 @@ static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
1040 int symbols, bits; 1040 int symbols, bits;
1041 int bytes = 0; 1041 int bytes = 0;
1042 1042
1043 usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1043 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec); 1044 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
1044 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams; 1045 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
1045 bits -= OFDM_PLCP_BITS; 1046 bits -= OFDM_PLCP_BITS;
1046 bytes = bits / 8; 1047 bytes = bits / 8;
1047 bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1048 if (bytes > 65532) 1048 if (bytes > 65532)
1049 bytes = 65532; 1049 bytes = 65532;
1050 1050
@@ -1076,6 +1076,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1076 struct ath_tx_info *info, int len, bool rts) 1076 struct ath_tx_info *info, int len, bool rts)
1077{ 1077{
1078 struct ath_hw *ah = sc->sc_ah; 1078 struct ath_hw *ah = sc->sc_ah;
1079 struct ath_common *common = ath9k_hw_common(ah);
1079 struct sk_buff *skb; 1080 struct sk_buff *skb;
1080 struct ieee80211_tx_info *tx_info; 1081 struct ieee80211_tx_info *tx_info;
1081 struct ieee80211_tx_rate *rates; 1082 struct ieee80211_tx_rate *rates;
@@ -1145,7 +1146,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1145 } 1146 }
1146 1147
1147 /* legacy rates */ 1148 /* legacy rates */
1148 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1149 rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1149 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1150 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1150 !(rate->flags & IEEE80211_RATE_ERP_G)) 1151 !(rate->flags & IEEE80211_RATE_ERP_G))
1151 phy = WLAN_RC_PHY_CCK; 1152 phy = WLAN_RC_PHY_CCK;
@@ -1698,7 +1699,7 @@ int ath_cabq_update(struct ath_softc *sc)
1698 1699
1699 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1700 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1700 1701
1701 qi.tqi_readyTime = (cur_conf->beacon_interval * 1702 qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
1702 ATH_CABQ_READY_TIME) / 100; 1703 ATH_CABQ_READY_TIME) / 100;
1703 ath_txq_update(sc, qnum, &qi); 1704 ath_txq_update(sc, qnum, &qi);
1704 1705
@@ -1768,7 +1769,7 @@ bool ath_drain_all_txq(struct ath_softc *sc)
1768 int i; 1769 int i;
1769 u32 npend = 0; 1770 u32 npend = 0;
1770 1771
1771 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 1772 if (test_bit(ATH_OP_INVALID, &common->op_flags))
1772 return true; 1773 return true;
1773 1774
1774 ath9k_hw_abort_tx_dma(ah); 1775 ath9k_hw_abort_tx_dma(ah);
@@ -1816,11 +1817,12 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1816 */ 1817 */
1817void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1818void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1818{ 1819{
1820 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1819 struct ath_atx_ac *ac, *last_ac; 1821 struct ath_atx_ac *ac, *last_ac;
1820 struct ath_atx_tid *tid, *last_tid; 1822 struct ath_atx_tid *tid, *last_tid;
1821 bool sent = false; 1823 bool sent = false;
1822 1824
1823 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || 1825 if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
1824 list_empty(&txq->axq_acq)) 1826 list_empty(&txq->axq_acq))
1825 return; 1827 return;
1826 1828
@@ -2470,7 +2472,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2470 2472
2471 ath_txq_lock(sc, txq); 2473 ath_txq_lock(sc, txq);
2472 for (;;) { 2474 for (;;) {
2473 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2475 if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2474 break; 2476 break;
2475 2477
2476 if (list_empty(&txq->axq_q)) { 2478 if (list_empty(&txq->axq_q)) {
@@ -2553,7 +2555,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2553 int status; 2555 int status;
2554 2556
2555 for (;;) { 2557 for (;;) {
2556 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2558 if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2557 break; 2559 break;
2558 2560
2559 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); 2561 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
@@ -2569,7 +2571,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2569 sc->beacon.tx_processed = true; 2571 sc->beacon.tx_processed = true;
2570 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2572 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2571 2573
2572 ath9k_csa_is_finished(sc); 2574 ath9k_csa_update(sc);
2573 continue; 2575 continue;
2574 } 2576 }
2575 2577
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 536bc46a2912..924135b8e575 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -572,7 +572,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
572 572
573static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len) 573static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
574{ 574{
575 struct ieee80211_bar *bar = (void *) data; 575 struct ieee80211_bar *bar = data;
576 struct carl9170_bar_list_entry *entry; 576 struct carl9170_bar_list_entry *entry;
577 unsigned int queue; 577 unsigned int queue;
578 578
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index e5e905910db4..415393dfb6fc 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -222,7 +222,7 @@ static const struct ieee80211_regdomain *ath_default_world_regdomain(void)
222static const struct 222static const struct
223ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg) 223ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
224{ 224{
225 switch (reg->regpair->regDmnEnum) { 225 switch (reg->regpair->reg_domain) {
226 case 0x60: 226 case 0x60:
227 case 0x61: 227 case 0x61:
228 case 0x62: 228 case 0x62:
@@ -431,7 +431,7 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
431 enum nl80211_reg_initiator initiator, 431 enum nl80211_reg_initiator initiator,
432 struct ath_regulatory *reg) 432 struct ath_regulatory *reg)
433{ 433{
434 switch (reg->regpair->regDmnEnum) { 434 switch (reg->regpair->reg_domain) {
435 case 0x60: 435 case 0x60:
436 case 0x63: 436 case 0x63:
437 case 0x66: 437 case 0x66:
@@ -560,7 +560,7 @@ static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
560 printk(KERN_DEBUG "ath: EEPROM indicates we " 560 printk(KERN_DEBUG "ath: EEPROM indicates we "
561 "should expect a direct regpair map\n"); 561 "should expect a direct regpair map\n");
562 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) 562 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
563 if (regDomainPairs[i].regDmnEnum == rd) 563 if (regDomainPairs[i].reg_domain == rd)
564 return true; 564 return true;
565 } 565 }
566 printk(KERN_DEBUG 566 printk(KERN_DEBUG
@@ -617,7 +617,7 @@ ath_get_regpair(int regdmn)
617 if (regdmn == NO_ENUMRD) 617 if (regdmn == NO_ENUMRD)
618 return NULL; 618 return NULL;
619 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) { 619 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
620 if (regDomainPairs[i].regDmnEnum == regdmn) 620 if (regDomainPairs[i].reg_domain == regdmn)
621 return &regDomainPairs[i]; 621 return &regDomainPairs[i];
622 } 622 }
623 return NULL; 623 return NULL;
@@ -741,7 +741,7 @@ static int __ath_regd_init(struct ath_regulatory *reg)
741 printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n", 741 printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
742 reg->alpha2[0], reg->alpha2[1]); 742 reg->alpha2[0], reg->alpha2[1]);
743 printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n", 743 printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
744 reg->regpair->regDmnEnum); 744 reg->regpair->reg_domain);
745 745
746 return 0; 746 return 0;
747} 747}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index ee25786b4447..73f12f196f14 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -44,6 +44,14 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
44 writel(data, wcn->mmio + addr); 44 writel(data, wcn->mmio + addr);
45} 45}
46 46
47#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
48do { \
49 if (wcn->chip_version == WCN36XX_CHIP_3680) \
50 wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
51 else \
52 wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
53} while (0) \
54
47static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data) 55static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
48{ 56{
49 *data = readl(wcn->mmio + addr); 57 *data = readl(wcn->mmio + addr);
@@ -680,7 +688,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
680 688
681 /* Setting interrupt path */ 689 /* Setting interrupt path */
682 reg_data = WCN36XX_DXE_CCU_INT; 690 reg_data = WCN36XX_DXE_CCU_INT;
683 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data); 691 wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
684 692
685 /***************************************/ 693 /***************************************/
686 /* Init descriptors for TX LOW channel */ 694 /* Init descriptors for TX LOW channel */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index c88562f85de1..35ee7e966bd2 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -28,11 +28,11 @@ H2H_TEST_RX_TX = DMA2
28*/ 28*/
29 29
30/* DXE registers */ 30/* DXE registers */
31#define WCN36XX_DXE_MEM_BASE 0x03000000
32#define WCN36XX_DXE_MEM_REG 0x202000 31#define WCN36XX_DXE_MEM_REG 0x202000
33 32
34#define WCN36XX_DXE_CCU_INT 0xA0011 33#define WCN36XX_DXE_CCU_INT 0xA0011
35#define WCN36XX_DXE_REG_CCU_INT 0x200b10 34#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
35#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
36 36
37/* TODO This must calculated properly but not hardcoded */ 37/* TODO This must calculated properly but not hardcoded */
38#define WCN36XX_DXE_CTRL_TX_L 0x328a44 38#define WCN36XX_DXE_CTRL_TX_L 0x328a44
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 3c2ef0c32f72..a1f1127d7808 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -4384,11 +4384,13 @@ enum place_holder_in_cap_bitmap {
4384 MAX_FEATURE_SUPPORTED = 128, 4384 MAX_FEATURE_SUPPORTED = 128,
4385}; 4385};
4386 4386
4387#define WCN36XX_HAL_CAPS_SIZE 4
4388
4387struct wcn36xx_hal_feat_caps_msg { 4389struct wcn36xx_hal_feat_caps_msg {
4388 4390
4389 struct wcn36xx_hal_msg_header header; 4391 struct wcn36xx_hal_msg_header header;
4390 4392
4391 u32 feat_caps[4]; 4393 u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
4392} __packed; 4394} __packed;
4393 4395
4394/* status codes to help debug rekey failures */ 4396/* status codes to help debug rekey failures */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e64a6784079e..4ab5370ab7a6 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -17,6 +17,7 @@
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/firmware.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include "wcn36xx.h" 22#include "wcn36xx.h"
22 23
@@ -177,6 +178,60 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
177 sta_priv->sta_index; 178 sta_priv->sta_index;
178} 179}
179 180
181static const char * const wcn36xx_caps_names[] = {
182 "MCC", /* 0 */
183 "P2P", /* 1 */
184 "DOT11AC", /* 2 */
185 "SLM_SESSIONIZATION", /* 3 */
186 "DOT11AC_OPMODE", /* 4 */
187 "SAP32STA", /* 5 */
188 "TDLS", /* 6 */
189 "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
190 "WLANACTIVE_OFFLOAD", /* 8 */
191 "BEACON_OFFLOAD", /* 9 */
192 "SCAN_OFFLOAD", /* 10 */
193 "ROAM_OFFLOAD", /* 11 */
194 "BCN_MISS_OFFLOAD", /* 12 */
195 "STA_POWERSAVE", /* 13 */
196 "STA_ADVANCED_PWRSAVE", /* 14 */
197 "AP_UAPSD", /* 15 */
198 "AP_DFS", /* 16 */
199 "BLOCKACK", /* 17 */
200 "PHY_ERR", /* 18 */
201 "BCN_FILTER", /* 19 */
202 "RTT", /* 20 */
203 "RATECTRL", /* 21 */
204 "WOW" /* 22 */
205};
206
207static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
208{
209 if (x >= ARRAY_SIZE(wcn36xx_caps_names))
210 return "UNKNOWN";
211 return wcn36xx_caps_names[x];
212}
213
214static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
215{
216 int i;
217
218 for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
219 if (get_feat_caps(wcn->fw_feat_caps, i))
220 wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
221 }
222}
223
224static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
225{
226 if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
227 wcn36xx_info("Chip is 3680\n");
228 wcn->chip_version = WCN36XX_CHIP_3680;
229 } else {
230 wcn36xx_info("Chip is 3660\n");
231 wcn->chip_version = WCN36XX_CHIP_3660;
232 }
233}
234
180static int wcn36xx_start(struct ieee80211_hw *hw) 235static int wcn36xx_start(struct ieee80211_hw *hw)
181{ 236{
182 struct wcn36xx *wcn = hw->priv; 237 struct wcn36xx *wcn = hw->priv;
@@ -223,6 +278,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
223 goto out_free_smd_buf; 278 goto out_free_smd_buf;
224 } 279 }
225 280
281 if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
282 ret = wcn36xx_smd_feature_caps_exchange(wcn);
283 if (ret)
284 wcn36xx_warn("Exchange feature caps failed\n");
285 else
286 wcn36xx_feat_caps_info(wcn);
287 }
288
289 wcn36xx_detect_chip_version(wcn);
290
226 /* DMA channel initialization */ 291 /* DMA channel initialization */
227 ret = wcn36xx_dxe_init(wcn); 292 ret = wcn36xx_dxe_init(wcn);
228 if (ret) { 293 if (ret) {
@@ -232,11 +297,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
232 297
233 wcn36xx_debugfs_init(wcn); 298 wcn36xx_debugfs_init(wcn);
234 299
235 if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
236 ret = wcn36xx_smd_feature_caps_exchange(wcn);
237 if (ret)
238 wcn36xx_warn("Exchange feature caps failed\n");
239 }
240 INIT_LIST_HEAD(&wcn->vif_list); 300 INIT_LIST_HEAD(&wcn->vif_list);
241 return 0; 301 return 0;
242 302
@@ -648,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
648 bss_conf->enable_beacon); 708 bss_conf->enable_beacon);
649 709
650 if (bss_conf->enable_beacon) { 710 if (bss_conf->enable_beacon) {
711 vif_priv->dtim_period = bss_conf->dtim_period;
651 vif_priv->bss_index = 0xff; 712 vif_priv->bss_index = 0xff;
652 wcn36xx_smd_config_bss(wcn, vif, NULL, 713 wcn36xx_smd_config_bss(wcn, vif, NULL,
653 vif->addr, false); 714 vif->addr, false);
@@ -992,6 +1053,7 @@ static int wcn36xx_remove(struct platform_device *pdev)
992 struct wcn36xx *wcn = hw->priv; 1053 struct wcn36xx *wcn = hw->priv;
993 wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n"); 1054 wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
994 1055
1056 release_firmware(wcn->nv);
995 mutex_destroy(&wcn->hal_mutex); 1057 mutex_destroy(&wcn->hal_mutex);
996 1058
997 ieee80211_unregister_hw(hw); 1059 ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 750626b0e22d..7bf0ef8a1f56 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -195,9 +195,11 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
195static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) 195static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
196{ 196{
197 int ret = 0; 197 int ret = 0;
198 unsigned long start;
198 wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len); 199 wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
199 200
200 init_completion(&wcn->hal_rsp_compl); 201 init_completion(&wcn->hal_rsp_compl);
202 start = jiffies;
201 ret = wcn->ctrl_ops->tx(wcn->hal_buf, len); 203 ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
202 if (ret) { 204 if (ret) {
203 wcn36xx_err("HAL TX failed\n"); 205 wcn36xx_err("HAL TX failed\n");
@@ -205,10 +207,13 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
205 } 207 }
206 if (wait_for_completion_timeout(&wcn->hal_rsp_compl, 208 if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
207 msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) { 209 msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
208 wcn36xx_err("Timeout while waiting SMD response\n"); 210 wcn36xx_err("Timeout! No SMD response in %dms\n",
211 HAL_MSG_TIMEOUT);
209 ret = -ETIME; 212 ret = -ETIME;
210 goto out; 213 goto out;
211 } 214 }
215 wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
216 jiffies_to_msecs(jiffies - start));
212out: 217out:
213 return ret; 218 return ret;
214} 219}
@@ -246,21 +251,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
246 251
247int wcn36xx_smd_load_nv(struct wcn36xx *wcn) 252int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
248{ 253{
249 const struct firmware *nv;
250 struct nv_data *nv_d; 254 struct nv_data *nv_d;
251 struct wcn36xx_hal_nv_img_download_req_msg msg_body; 255 struct wcn36xx_hal_nv_img_download_req_msg msg_body;
252 int fw_bytes_left; 256 int fw_bytes_left;
253 int ret; 257 int ret;
254 u16 fm_offset = 0; 258 u16 fm_offset = 0;
255 259
256 ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev); 260 if (!wcn->nv) {
257 if (ret) { 261 ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
258 wcn36xx_err("Failed to load nv file %s: %d\n", 262 if (ret) {
259 WLAN_NV_FILE, ret); 263 wcn36xx_err("Failed to load nv file %s: %d\n",
260 goto out_free_nv; 264 WLAN_NV_FILE, ret);
265 goto out;
266 }
261 } 267 }
262 268
263 nv_d = (struct nv_data *)nv->data; 269 nv_d = (struct nv_data *)wcn->nv->data;
264 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ); 270 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
265 271
266 msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE; 272 msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
@@ -270,7 +276,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
270 mutex_lock(&wcn->hal_mutex); 276 mutex_lock(&wcn->hal_mutex);
271 277
272 do { 278 do {
273 fw_bytes_left = nv->size - fm_offset - 4; 279 fw_bytes_left = wcn->nv->size - fm_offset - 4;
274 if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) { 280 if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
275 msg_body.last_fragment = 0; 281 msg_body.last_fragment = 0;
276 msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE; 282 msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
@@ -308,10 +314,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
308 314
309out_unlock: 315out_unlock:
310 mutex_unlock(&wcn->hal_mutex); 316 mutex_unlock(&wcn->hal_mutex);
311out_free_nv: 317out: return ret;
312 release_firmware(nv);
313
314 return ret;
315} 318}
316 319
317static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len) 320static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
@@ -899,11 +902,12 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
899 902
900 sta_priv->sta_index = params->sta_index; 903 sta_priv->sta_index = params->sta_index;
901 sta_priv->dpu_desc_index = params->dpu_index; 904 sta_priv->dpu_desc_index = params->dpu_index;
905 sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
902 906
903 wcn36xx_dbg(WCN36XX_DBG_HAL, 907 wcn36xx_dbg(WCN36XX_DBG_HAL,
904 "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n", 908 "hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
905 params->status, params->sta_index, params->bssid_index, 909 params->status, params->sta_index, params->bssid_index,
906 params->p2p); 910 params->uc_ucast_sig, params->p2p);
907 911
908 return 0; 912 return 0;
909} 913}
@@ -1118,7 +1122,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
1118 priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index; 1122 priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
1119 } 1123 }
1120 1124
1121 priv_vif->ucast_dpu_signature = params->ucast_dpu_signature; 1125 priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
1122 1126
1123 return 0; 1127 return 0;
1124} 1128}
@@ -1637,12 +1641,12 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
1637 1641
1638 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); 1642 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
1639 if (ret) { 1643 if (ret) {
1640 wcn36xx_err("Sending hal_exit_bmps failed\n"); 1644 wcn36xx_err("Sending hal_keep_alive failed\n");
1641 goto out; 1645 goto out;
1642 } 1646 }
1643 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); 1647 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
1644 if (ret) { 1648 if (ret) {
1645 wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret); 1649 wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
1646 goto out; 1650 goto out;
1647 } 1651 }
1648out: 1652out:
@@ -1682,8 +1686,7 @@ out:
1682 return ret; 1686 return ret;
1683} 1687}
1684 1688
1685static inline void set_feat_caps(u32 *bitmap, 1689void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
1686 enum place_holder_in_cap_bitmap cap)
1687{ 1690{
1688 int arr_idx, bit_idx; 1691 int arr_idx, bit_idx;
1689 1692
@@ -1697,8 +1700,7 @@ static inline void set_feat_caps(u32 *bitmap,
1697 bitmap[arr_idx] |= (1 << bit_idx); 1700 bitmap[arr_idx] |= (1 << bit_idx);
1698} 1701}
1699 1702
1700static inline int get_feat_caps(u32 *bitmap, 1703int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
1701 enum place_holder_in_cap_bitmap cap)
1702{ 1704{
1703 int arr_idx, bit_idx; 1705 int arr_idx, bit_idx;
1704 int ret = 0; 1706 int ret = 0;
@@ -1714,8 +1716,7 @@ static inline int get_feat_caps(u32 *bitmap,
1714 return ret; 1716 return ret;
1715} 1717}
1716 1718
1717static inline void clear_feat_caps(u32 *bitmap, 1719void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
1718 enum place_holder_in_cap_bitmap cap)
1719{ 1720{
1720 int arr_idx, bit_idx; 1721 int arr_idx, bit_idx;
1721 1722
@@ -1731,8 +1732,8 @@ static inline void clear_feat_caps(u32 *bitmap,
1731 1732
1732int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn) 1733int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
1733{ 1734{
1734 struct wcn36xx_hal_feat_caps_msg msg_body; 1735 struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
1735 int ret = 0; 1736 int ret = 0, i;
1736 1737
1737 mutex_lock(&wcn->hal_mutex); 1738 mutex_lock(&wcn->hal_mutex);
1738 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ); 1739 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -1746,12 +1747,15 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
1746 wcn36xx_err("Sending hal_feature_caps_exchange failed\n"); 1747 wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
1747 goto out; 1748 goto out;
1748 } 1749 }
1749 ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); 1750 if (wcn->hal_rsp_len != sizeof(*rsp)) {
1750 if (ret) { 1751 wcn36xx_err("Invalid hal_feature_caps_exchange response");
1751 wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
1752 ret);
1753 goto out; 1752 goto out;
1754 } 1753 }
1754
1755 rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
1756
1757 for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
1758 wcn->fw_feat_caps[i] = rsp->feat_caps[i];
1755out: 1759out:
1756 mutex_unlock(&wcn->hal_mutex); 1760 mutex_unlock(&wcn->hal_mutex);
1757 return ret; 1761 return ret;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index e7c39019c6f1..008d03423dbf 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -24,7 +24,7 @@
24 24
25#define WCN36XX_HAL_BUF_SIZE 4096 25#define WCN36XX_HAL_BUF_SIZE 4096
26 26
27#define HAL_MSG_TIMEOUT 200 27#define HAL_MSG_TIMEOUT 500
28#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400 28#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
29#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200 29#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
30/* The PNO version info be contained in the rsp msg */ 30/* The PNO version info be contained in the rsp msg */
@@ -112,6 +112,9 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
112int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2, 112int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
113 u32 arg3, u32 arg4, u32 arg5); 113 u32 arg3, u32 arg4, u32 arg5);
114int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn); 114int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
115void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
116int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
117void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
115 118
116int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn, 119int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
117 struct ieee80211_sta *sta, 120 struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index b2b60e30caaf..32bb26a0db2a 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,8 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
57 RX_FLAG_MMIC_STRIPPED | 57 RX_FLAG_MMIC_STRIPPED |
58 RX_FLAG_DECRYPTED; 58 RX_FLAG_DECRYPTED;
59 59
60 wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n", 60 wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
61 status.flag, status.vendor_radiotap_len);
62 61
63 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 62 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
64 63
@@ -132,6 +131,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
132 struct ieee80211_vif, 131 struct ieee80211_vif,
133 drv_priv); 132 drv_priv);
134 133
134 bd->dpu_sign = sta_priv->ucast_dpu_sign;
135 if (vif->type == NL80211_IFTYPE_STATION) { 135 if (vif->type == NL80211_IFTYPE_STATION) {
136 bd->sta_index = sta_priv->bss_sta_index; 136 bd->sta_index = sta_priv->bss_sta_index;
137 bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index; 137 bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
@@ -145,10 +145,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
145 __vif_priv = get_vif_by_addr(wcn, hdr->addr2); 145 __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
146 bd->sta_index = __vif_priv->self_sta_index; 146 bd->sta_index = __vif_priv->self_sta_index;
147 bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index; 147 bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
148 bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
148 } 149 }
149 150
150 bd->dpu_sign = __vif_priv->ucast_dpu_signature;
151
152 if (ieee80211_is_nullfunc(hdr->frame_control) || 151 if (ieee80211_is_nullfunc(hdr->frame_control) ||
153 (sta_priv && !sta_priv->is_data_encrypted)) 152 (sta_priv && !sta_priv->is_data_encrypted))
154 bd->dpu_ne = 1; 153 bd->dpu_ne = 1;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 8fa5cbace5ab..f0fb81dfd17b 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@ struct wcn36xx_vif {
125 enum wcn36xx_power_state pw_state; 125 enum wcn36xx_power_state pw_state;
126 126
127 u8 bss_index; 127 u8 bss_index;
128 u8 ucast_dpu_signature;
129 /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */ 128 /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
130 u8 self_sta_index; 129 u8 self_sta_index;
131 u8 self_dpu_desc_index; 130 u8 self_dpu_desc_index;
131 u8 self_ucast_dpu_sign;
132}; 132};
133 133
134/** 134/**
@@ -159,6 +159,7 @@ struct wcn36xx_sta {
159 u16 tid; 159 u16 tid;
160 u8 sta_index; 160 u8 sta_index;
161 u8 dpu_desc_index; 161 u8 dpu_desc_index;
162 u8 ucast_dpu_sign;
162 u8 bss_sta_index; 163 u8 bss_sta_index;
163 u8 bss_dpu_desc_index; 164 u8 bss_dpu_desc_index;
164 bool is_data_encrypted; 165 bool is_data_encrypted;
@@ -171,10 +172,14 @@ struct wcn36xx {
171 struct device *dev; 172 struct device *dev;
172 struct list_head vif_list; 173 struct list_head vif_list;
173 174
175 const struct firmware *nv;
176
174 u8 fw_revision; 177 u8 fw_revision;
175 u8 fw_version; 178 u8 fw_version;
176 u8 fw_minor; 179 u8 fw_minor;
177 u8 fw_major; 180 u8 fw_major;
181 u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
182 u32 chip_version;
178 183
179 /* extra byte for the NULL termination */ 184 /* extra byte for the NULL termination */
180 u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1]; 185 u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -222,6 +227,9 @@ struct wcn36xx {
222 227
223}; 228};
224 229
230#define WCN36XX_CHIP_3660 0
231#define WCN36XX_CHIP_3680 1
232
225static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn, 233static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
226 u8 major, 234 u8 major,
227 u8 minor, 235 u8 minor,
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 990dd42ae79e..c7a3465fd02a 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-y += wmi.o
9wil6210-y += interrupt.o 9wil6210-y += interrupt.o
10wil6210-y += txrx.o 10wil6210-y += txrx.o
11wil6210-y += debug.o 11wil6210-y += debug.o
12wil6210-y += rx_reorder.o
12wil6210-$(CONFIG_WIL6210_TRACING) += trace.o 13wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
13 14
14# for tracing framework to find trace.h 15# for tracing framework to find trace.h
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 5b340769d5bb..4806a49cb61b 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -104,41 +104,125 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type)
104 return -EOPNOTSUPP; 104 return -EOPNOTSUPP;
105} 105}
106 106
107static int wil_cfg80211_get_station(struct wiphy *wiphy, 107static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
108 struct net_device *ndev, 108 struct station_info *sinfo)
109 u8 *mac, struct station_info *sinfo)
110{ 109{
111 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
112 int rc;
113 struct wmi_notify_req_cmd cmd = { 110 struct wmi_notify_req_cmd cmd = {
114 .cid = 0, 111 .cid = cid,
115 .interval_usec = 0, 112 .interval_usec = 0,
116 }; 113 };
114 struct {
115 struct wil6210_mbox_hdr_wmi wmi;
116 struct wmi_notify_req_done_event evt;
117 } __packed reply;
118 struct wil_net_stats *stats = &wil->sta[cid].stats;
119 int rc;
117 120
118 if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
119 return -ENOENT;
120
121 /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
122 rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd), 121 rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
123 WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20); 122 WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
124 if (rc) 123 if (rc)
125 return rc; 124 return rc;
126 125
126 wil_dbg_wmi(wil, "Link status for CID %d: {\n"
127 " MCS %d TSF 0x%016llx\n"
128 " BF status 0x%08x SNR 0x%08x SQI %d%%\n"
129 " Tx Tpt %d goodput %d Rx goodput %d\n"
130 " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
131 cid, le16_to_cpu(reply.evt.bf_mcs),
132 le64_to_cpu(reply.evt.tsf), reply.evt.status,
133 le32_to_cpu(reply.evt.snr_val),
134 reply.evt.sqi,
135 le32_to_cpu(reply.evt.tx_tpt),
136 le32_to_cpu(reply.evt.tx_goodput),
137 le32_to_cpu(reply.evt.rx_goodput),
138 le16_to_cpu(reply.evt.my_rx_sector),
139 le16_to_cpu(reply.evt.my_tx_sector),
140 le16_to_cpu(reply.evt.other_rx_sector),
141 le16_to_cpu(reply.evt.other_tx_sector));
142
127 sinfo->generation = wil->sinfo_gen; 143 sinfo->generation = wil->sinfo_gen;
128 144
129 sinfo->filled |= STATION_INFO_TX_BITRATE; 145 sinfo->filled = STATION_INFO_RX_BYTES |
146 STATION_INFO_TX_BYTES |
147 STATION_INFO_RX_PACKETS |
148 STATION_INFO_TX_PACKETS |
149 STATION_INFO_RX_BITRATE |
150 STATION_INFO_TX_BITRATE |
151 STATION_INFO_RX_DROP_MISC |
152 STATION_INFO_TX_FAILED;
153
130 sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; 154 sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
131 sinfo->txrate.mcs = wil->stats.bf_mcs; 155 sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
132 sinfo->filled |= STATION_INFO_RX_BITRATE;
133 sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; 156 sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
134 sinfo->rxrate.mcs = wil->stats.last_mcs_rx; 157 sinfo->rxrate.mcs = stats->last_mcs_rx;
158 sinfo->rx_bytes = stats->rx_bytes;
159 sinfo->rx_packets = stats->rx_packets;
160 sinfo->rx_dropped_misc = stats->rx_dropped;
161 sinfo->tx_bytes = stats->tx_bytes;
162 sinfo->tx_packets = stats->tx_packets;
163 sinfo->tx_failed = stats->tx_errors;
135 164
136 if (test_bit(wil_status_fwconnected, &wil->status)) { 165 if (test_bit(wil_status_fwconnected, &wil->status)) {
137 sinfo->filled |= STATION_INFO_SIGNAL; 166 sinfo->filled |= STATION_INFO_SIGNAL;
138 sinfo->signal = 12; /* TODO: provide real value */ 167 sinfo->signal = reply.evt.sqi;
139 } 168 }
140 169
141 return 0; 170 return rc;
171}
172
173static int wil_cfg80211_get_station(struct wiphy *wiphy,
174 struct net_device *ndev,
175 u8 *mac, struct station_info *sinfo)
176{
177 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
178 int rc;
179
180 int cid = wil_find_cid(wil, mac);
181
182 wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
183 if (cid < 0)
184 return cid;
185
186 rc = wil_cid_fill_sinfo(wil, cid, sinfo);
187
188 return rc;
189}
190
191/*
192 * Find @idx-th active STA for station dump.
193 */
194static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx)
195{
196 int i;
197
198 for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
199 if (wil->sta[i].status == wil_sta_unused)
200 continue;
201 if (idx == 0)
202 return i;
203 idx--;
204 }
205
206 return -ENOENT;
207}
208
209static int wil_cfg80211_dump_station(struct wiphy *wiphy,
210 struct net_device *dev, int idx,
211 u8 *mac, struct station_info *sinfo)
212{
213 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
214 int rc;
215 int cid = wil_find_cid_by_idx(wil, idx);
216
217 if (cid < 0)
218 return -ENOENT;
219
220 memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
221 wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
222
223 rc = wil_cid_fill_sinfo(wil, cid, sinfo);
224
225 return rc;
142} 226}
143 227
144static int wil_cfg80211_change_iface(struct wiphy *wiphy, 228static int wil_cfg80211_change_iface(struct wiphy *wiphy,
@@ -181,6 +265,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
181 u16 chnl[4]; 265 u16 chnl[4];
182 } __packed cmd; 266 } __packed cmd;
183 uint i, n; 267 uint i, n;
268 int rc;
184 269
185 if (wil->scan_request) { 270 if (wil->scan_request) {
186 wil_err(wil, "Already scanning\n"); 271 wil_err(wil, "Already scanning\n");
@@ -198,7 +283,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
198 283
199 /* FW don't support scan after connection attempt */ 284 /* FW don't support scan after connection attempt */
200 if (test_bit(wil_status_dontscan, &wil->status)) { 285 if (test_bit(wil_status_dontscan, &wil->status)) {
201 wil_err(wil, "Scan after connect attempt not supported\n"); 286 wil_err(wil, "Can't scan now\n");
202 return -EBUSY; 287 return -EBUSY;
203 } 288 }
204 289
@@ -221,8 +306,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
221 request->channels[i]->center_freq); 306 request->channels[i]->center_freq);
222 } 307 }
223 308
224 return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + 309 rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
225 cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); 310 cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
311
312 if (rc)
313 wil->scan_request = NULL;
314
315 return rc;
226} 316}
227 317
228static int wil_cfg80211_connect(struct wiphy *wiphy, 318static int wil_cfg80211_connect(struct wiphy *wiphy,
@@ -237,6 +327,10 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
237 int ch; 327 int ch;
238 int rc = 0; 328 int rc = 0;
239 329
330 if (test_bit(wil_status_fwconnecting, &wil->status) ||
331 test_bit(wil_status_fwconnected, &wil->status))
332 return -EALREADY;
333
240 bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, 334 bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
241 sme->ssid, sme->ssid_len, 335 sme->ssid, sme->ssid_len,
242 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 336 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@@ -318,10 +412,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
318 412
319 memcpy(conn.bssid, bss->bssid, ETH_ALEN); 413 memcpy(conn.bssid, bss->bssid, ETH_ALEN);
320 memcpy(conn.dst_mac, bss->bssid, ETH_ALEN); 414 memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
321 /* 415
322 * FW don't support scan after connection attempt
323 */
324 set_bit(wil_status_dontscan, &wil->status);
325 set_bit(wil_status_fwconnecting, &wil->status); 416 set_bit(wil_status_fwconnecting, &wil->status);
326 417
327 rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); 418 rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
@@ -330,7 +421,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
330 mod_timer(&wil->connect_timer, 421 mod_timer(&wil->connect_timer,
331 jiffies + msecs_to_jiffies(2000)); 422 jiffies + msecs_to_jiffies(2000));
332 } else { 423 } else {
333 clear_bit(wil_status_dontscan, &wil->status);
334 clear_bit(wil_status_fwconnecting, &wil->status); 424 clear_bit(wil_status_fwconnecting, &wil->status);
335 } 425 }
336 426
@@ -352,6 +442,40 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
352 return rc; 442 return rc;
353} 443}
354 444
445static int wil_cfg80211_mgmt_tx(struct wiphy *wiphy,
446 struct wireless_dev *wdev,
447 struct cfg80211_mgmt_tx_params *params,
448 u64 *cookie)
449{
450 const u8 *buf = params->buf;
451 size_t len = params->len;
452 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
453 int rc;
454 struct ieee80211_mgmt *mgmt_frame = (void *)buf;
455 struct wmi_sw_tx_req_cmd *cmd;
456 struct {
457 struct wil6210_mbox_hdr_wmi wmi;
458 struct wmi_sw_tx_complete_event evt;
459 } __packed evt;
460
461 cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
462 if (!cmd)
463 return -ENOMEM;
464
465 memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
466 cmd->len = cpu_to_le16(len);
467 memcpy(cmd->payload, buf, len);
468
469 rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
470 WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
471 if (rc == 0)
472 rc = evt.evt.status;
473
474 kfree(cmd);
475
476 return rc;
477}
478
355static int wil_cfg80211_set_channel(struct wiphy *wiphy, 479static int wil_cfg80211_set_channel(struct wiphy *wiphy,
356 struct cfg80211_chan_def *chandef) 480 struct cfg80211_chan_def *chandef)
357{ 481{
@@ -402,6 +526,41 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
402 return 0; 526 return 0;
403} 527}
404 528
529static int wil_remain_on_channel(struct wiphy *wiphy,
530 struct wireless_dev *wdev,
531 struct ieee80211_channel *chan,
532 unsigned int duration,
533 u64 *cookie)
534{
535 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
536 int rc;
537
538 /* TODO: handle duration */
539 wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
540
541 rc = wmi_set_channel(wil, chan->hw_value);
542 if (rc)
543 return rc;
544
545 rc = wmi_rxon(wil, true);
546
547 return rc;
548}
549
550static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
551 struct wireless_dev *wdev,
552 u64 cookie)
553{
554 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
555 int rc;
556
557 wil_info(wil, "%s()\n", __func__);
558
559 rc = wmi_rxon(wil, false);
560
561 return rc;
562}
563
405static int wil_fix_bcon(struct wil6210_priv *wil, 564static int wil_fix_bcon(struct wil6210_priv *wil,
406 struct cfg80211_beacon_data *bcon) 565 struct cfg80211_beacon_data *bcon)
407{ 566{
@@ -450,18 +609,20 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
450 if (wil_fix_bcon(wil, bcon)) 609 if (wil_fix_bcon(wil, bcon))
451 wil_dbg_misc(wil, "Fixed bcon\n"); 610 wil_dbg_misc(wil, "Fixed bcon\n");
452 611
612 mutex_lock(&wil->mutex);
613
453 rc = wil_reset(wil); 614 rc = wil_reset(wil);
454 if (rc) 615 if (rc)
455 return rc; 616 goto out;
456 617
457 /* Rx VRING. */ 618 /* Rx VRING. */
458 rc = wil_rx_init(wil); 619 rc = wil_rx_init(wil);
459 if (rc) 620 if (rc)
460 return rc; 621 goto out;
461 622
462 rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); 623 rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
463 if (rc) 624 if (rc)
464 return rc; 625 goto out;
465 626
466 /* MAC address - pre-requisite for other commands */ 627 /* MAC address - pre-requisite for other commands */
467 wmi_set_mac_address(wil, ndev->dev_addr); 628 wmi_set_mac_address(wil, ndev->dev_addr);
@@ -485,11 +646,13 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
485 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype, 646 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
486 channel->hw_value); 647 channel->hw_value);
487 if (rc) 648 if (rc)
488 return rc; 649 goto out;
489 650
490 651
491 netif_carrier_on(ndev); 652 netif_carrier_on(ndev);
492 653
654out:
655 mutex_unlock(&wil->mutex);
493 return rc; 656 return rc;
494} 657}
495 658
@@ -499,17 +662,36 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
499 int rc = 0; 662 int rc = 0;
500 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 663 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
501 664
665 mutex_lock(&wil->mutex);
666
502 rc = wmi_pcp_stop(wil); 667 rc = wmi_pcp_stop(wil);
503 668
669 mutex_unlock(&wil->mutex);
504 return rc; 670 return rc;
505} 671}
506 672
673static int wil_cfg80211_del_station(struct wiphy *wiphy,
674 struct net_device *dev, u8 *mac)
675{
676 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
677
678 mutex_lock(&wil->mutex);
679 wil6210_disconnect(wil, mac);
680 mutex_unlock(&wil->mutex);
681
682 return 0;
683}
684
507static struct cfg80211_ops wil_cfg80211_ops = { 685static struct cfg80211_ops wil_cfg80211_ops = {
508 .scan = wil_cfg80211_scan, 686 .scan = wil_cfg80211_scan,
509 .connect = wil_cfg80211_connect, 687 .connect = wil_cfg80211_connect,
510 .disconnect = wil_cfg80211_disconnect, 688 .disconnect = wil_cfg80211_disconnect,
511 .change_virtual_intf = wil_cfg80211_change_iface, 689 .change_virtual_intf = wil_cfg80211_change_iface,
512 .get_station = wil_cfg80211_get_station, 690 .get_station = wil_cfg80211_get_station,
691 .dump_station = wil_cfg80211_dump_station,
692 .remain_on_channel = wil_remain_on_channel,
693 .cancel_remain_on_channel = wil_cancel_remain_on_channel,
694 .mgmt_tx = wil_cfg80211_mgmt_tx,
513 .set_monitor_channel = wil_cfg80211_set_channel, 695 .set_monitor_channel = wil_cfg80211_set_channel,
514 .add_key = wil_cfg80211_add_key, 696 .add_key = wil_cfg80211_add_key,
515 .del_key = wil_cfg80211_del_key, 697 .del_key = wil_cfg80211_del_key,
@@ -517,6 +699,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
517 /* AP mode */ 699 /* AP mode */
518 .start_ap = wil_cfg80211_start_ap, 700 .start_ap = wil_cfg80211_start_ap,
519 .stop_ap = wil_cfg80211_stop_ap, 701 .stop_ap = wil_cfg80211_stop_ap,
702 .del_station = wil_cfg80211_del_station,
520}; 703};
521 704
522static void wil_wiphy_init(struct wiphy *wiphy) 705static void wil_wiphy_init(struct wiphy *wiphy)
@@ -542,7 +725,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
542 wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz; 725 wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
543 726
544 /* TODO: figure this out */ 727 /* TODO: figure this out */
545 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 728 wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
546 729
547 wiphy->cipher_suites = wil_cipher_suites; 730 wiphy->cipher_suites = wil_cipher_suites;
548 wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites); 731 wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 1caa31992a7e..ecdabe4adec3 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -26,9 +26,11 @@
26/* Nasty hack. Better have per device instances */ 26/* Nasty hack. Better have per device instances */
27static u32 mem_addr; 27static u32 mem_addr;
28static u32 dbg_txdesc_index; 28static u32 dbg_txdesc_index;
29static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
29 30
30static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, 31static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
31 const char *name, struct vring *vring) 32 const char *name, struct vring *vring,
33 char _s, char _h)
32{ 34{
33 void __iomem *x = wmi_addr(wil, vring->hwtail); 35 void __iomem *x = wmi_addr(wil, vring->hwtail);
34 36
@@ -50,8 +52,8 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
50 volatile struct vring_tx_desc *d = &vring->va[i].tx; 52 volatile struct vring_tx_desc *d = &vring->va[i].tx;
51 if ((i % 64) == 0 && (i != 0)) 53 if ((i % 64) == 0 && (i != 0))
52 seq_printf(s, "\n"); 54 seq_printf(s, "\n");
53 seq_printf(s, "%s", (d->dma.status & BIT(0)) ? 55 seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
54 "S" : (vring->ctx[i].skb ? "H" : "h")); 56 _s : (vring->ctx[i].skb ? _h : 'h'));
55 } 57 }
56 seq_printf(s, "\n"); 58 seq_printf(s, "\n");
57 } 59 }
@@ -63,14 +65,19 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
63 uint i; 65 uint i;
64 struct wil6210_priv *wil = s->private; 66 struct wil6210_priv *wil = s->private;
65 67
66 wil_print_vring(s, wil, "rx", &wil->vring_rx); 68 wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
67 69
68 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { 70 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
69 struct vring *vring = &(wil->vring_tx[i]); 71 struct vring *vring = &(wil->vring_tx[i]);
70 if (vring->va) { 72 if (vring->va) {
73 int cid = wil->vring2cid_tid[i][0];
74 int tid = wil->vring2cid_tid[i][1];
71 char name[10]; 75 char name[10];
72 snprintf(name, sizeof(name), "tx_%2d", i); 76 snprintf(name, sizeof(name), "tx_%2d", i);
73 wil_print_vring(s, wil, name, vring); 77
78 seq_printf(s, "\n%pM CID %d TID %d\n",
79 wil->sta[cid].addr, cid, tid);
80 wil_print_vring(s, wil, name, vring, '_', 'H');
74 } 81 }
75 } 82 }
76 83
@@ -390,25 +397,78 @@ static const struct file_operations fops_reset = {
390 .write = wil_write_file_reset, 397 .write = wil_write_file_reset,
391 .open = simple_open, 398 .open = simple_open,
392}; 399};
393/*---------Tx descriptor------------*/
394 400
401static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
402 const char *prefix)
403{
404 char printbuf[16 * 3 + 2];
405 int i = 0;
406 while (i < len) {
407 int l = min(len - i, 16);
408 hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
409 sizeof(printbuf), false);
410 seq_printf(s, "%s%s\n", prefix, printbuf);
411 i += l;
412 }
413}
414
415static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
416{
417 int i = 0;
418 int len = skb_headlen(skb);
419 void *p = skb->data;
420 int nr_frags = skb_shinfo(skb)->nr_frags;
421
422 seq_printf(s, " len = %d\n", len);
423 wil_seq_hexdump(s, p, len, " : ");
424
425 if (nr_frags) {
426 seq_printf(s, " nr_frags = %d\n", nr_frags);
427 for (i = 0; i < nr_frags; i++) {
428 const struct skb_frag_struct *frag =
429 &skb_shinfo(skb)->frags[i];
430
431 len = skb_frag_size(frag);
432 p = skb_frag_address_safe(frag);
433 seq_printf(s, " [%2d] : len = %d\n", i, len);
434 wil_seq_hexdump(s, p, len, " : ");
435 }
436 }
437}
438
439/*---------Tx/Rx descriptor------------*/
395static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) 440static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
396{ 441{
397 struct wil6210_priv *wil = s->private; 442 struct wil6210_priv *wil = s->private;
398 struct vring *vring = &(wil->vring_tx[0]); 443 struct vring *vring;
444 bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
445 if (tx)
446 vring = &(wil->vring_tx[dbg_vring_index]);
447 else
448 vring = &wil->vring_rx;
399 449
400 if (!vring->va) { 450 if (!vring->va) {
401 seq_printf(s, "No Tx VRING\n"); 451 if (tx)
452 seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
453 else
454 seq_puts(s, "No Rx VRING\n");
402 return 0; 455 return 0;
403 } 456 }
404 457
405 if (dbg_txdesc_index < vring->size) { 458 if (dbg_txdesc_index < vring->size) {
459 /* use struct vring_tx_desc for Rx as well,
460 * only field used, .dma.length, is the same
461 */
406 volatile struct vring_tx_desc *d = 462 volatile struct vring_tx_desc *d =
407 &(vring->va[dbg_txdesc_index].tx); 463 &(vring->va[dbg_txdesc_index].tx);
408 volatile u32 *u = (volatile u32 *)d; 464 volatile u32 *u = (volatile u32 *)d;
409 struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb; 465 struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
410 466
411 seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index); 467 if (tx)
468 seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
469 dbg_txdesc_index);
470 else
471 seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
412 seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n", 472 seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
413 u[0], u[1], u[2], u[3]); 473 u[0], u[1], u[2], u[3]);
414 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n", 474 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -416,31 +476,19 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
416 seq_printf(s, " SKB = %p\n", skb); 476 seq_printf(s, " SKB = %p\n", skb);
417 477
418 if (skb) { 478 if (skb) {
419 char printbuf[16 * 3 + 2]; 479 skb_get(skb);
420 int i = 0; 480 wil_seq_print_skb(s, skb);
421 int len = le16_to_cpu(d->dma.length); 481 kfree_skb(skb);
422 void *p = skb->data;
423
424 if (len != skb_headlen(skb)) {
425 seq_printf(s, "!!! len: desc = %d skb = %d\n",
426 len, skb_headlen(skb));
427 len = min_t(int, len, skb_headlen(skb));
428 }
429
430 seq_printf(s, " len = %d\n", len);
431
432 while (i < len) {
433 int l = min(len - i, 16);
434 hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
435 sizeof(printbuf), false);
436 seq_printf(s, " : %s\n", printbuf);
437 i += l;
438 }
439 } 482 }
440 seq_printf(s, "}\n"); 483 seq_printf(s, "}\n");
441 } else { 484 } else {
442 seq_printf(s, "TxDesc index (%d) >= size (%d)\n", 485 if (tx)
443 dbg_txdesc_index, vring->size); 486 seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
487 dbg_vring_index, dbg_txdesc_index,
488 vring->size);
489 else
490 seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
491 dbg_txdesc_index, vring->size);
444 } 492 }
445 493
446 return 0; 494 return 0;
@@ -570,6 +618,69 @@ static const struct file_operations fops_temp = {
570 .llseek = seq_lseek, 618 .llseek = seq_lseek,
571}; 619};
572 620
621/*---------Station matrix------------*/
622static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
623{
624 int i;
625 u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
626 seq_printf(s, "0x%03x [", r->head_seq_num);
627 for (i = 0; i < r->buf_size; i++) {
628 if (i == index)
629 seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
630 else
631 seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
632 }
633 seq_puts(s, "]\n");
634}
635
636static int wil_sta_debugfs_show(struct seq_file *s, void *data)
637{
638 struct wil6210_priv *wil = s->private;
639 int i, tid;
640
641 for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
642 struct wil_sta_info *p = &wil->sta[i];
643 char *status = "unknown";
644 switch (p->status) {
645 case wil_sta_unused:
646 status = "unused ";
647 break;
648 case wil_sta_conn_pending:
649 status = "pending ";
650 break;
651 case wil_sta_connected:
652 status = "connected";
653 break;
654 }
655 seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
656 (p->data_port_open ? " data_port_open" : ""));
657
658 if (p->status == wil_sta_connected) {
659 for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
660 struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
661 if (r) {
662 seq_printf(s, "[%2d] ", tid);
663 wil_print_rxtid(s, r);
664 }
665 }
666 }
667 }
668
669 return 0;
670}
671
672static int wil_sta_seq_open(struct inode *inode, struct file *file)
673{
674 return single_open(file, wil_sta_debugfs_show, inode->i_private);
675}
676
677static const struct file_operations fops_sta = {
678 .open = wil_sta_seq_open,
679 .release = single_release,
680 .read = seq_read,
681 .llseek = seq_lseek,
682};
683
573/*----------------*/ 684/*----------------*/
574int wil6210_debugfs_init(struct wil6210_priv *wil) 685int wil6210_debugfs_init(struct wil6210_priv *wil)
575{ 686{
@@ -581,9 +692,13 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
581 692
582 debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox); 693 debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
583 debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring); 694 debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
584 debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc); 695 debugfs_create_file("stations", S_IRUGO, dbg, wil, &fops_sta);
585 debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg, 696 debugfs_create_file("desc", S_IRUGO, dbg, wil, &fops_txdesc);
697 debugfs_create_u32("desc_index", S_IRUGO | S_IWUSR, dbg,
586 &dbg_txdesc_index); 698 &dbg_txdesc_index);
699 debugfs_create_u32("vring_index", S_IRUGO | S_IWUSR, dbg,
700 &dbg_vring_index);
701
587 debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf); 702 debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
588 debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid); 703 debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
589 debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg, 704 debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 10919f95a83c..5824cd41e4ba 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -195,8 +195,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
195 if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) { 195 if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
196 wil_dbg_irq(wil, "RX done\n"); 196 wil_dbg_irq(wil, "RX done\n");
197 isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE; 197 isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
198 wil_dbg_txrx(wil, "NAPI schedule\n"); 198 if (test_bit(wil_status_reset_done, &wil->status)) {
199 napi_schedule(&wil->napi_rx); 199 wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
200 napi_schedule(&wil->napi_rx);
201 } else {
202 wil_err(wil, "Got Rx interrupt while in reset\n");
203 }
200 } 204 }
201 205
202 if (isr) 206 if (isr)
@@ -226,10 +230,15 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
226 230
227 if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { 231 if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
228 wil_dbg_irq(wil, "TX done\n"); 232 wil_dbg_irq(wil, "TX done\n");
229 napi_schedule(&wil->napi_tx);
230 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; 233 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
231 /* clear also all VRING interrupts */ 234 /* clear also all VRING interrupts */
232 isr &= ~(BIT(25) - 1UL); 235 isr &= ~(BIT(25) - 1UL);
236 if (test_bit(wil_status_reset_done, &wil->status)) {
237 wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
238 napi_schedule(&wil->napi_tx);
239 } else {
240 wil_err(wil, "Got Tx interrupt while in reset\n");
241 }
233 } 242 }
234 243
235 if (isr) 244 if (isr)
@@ -319,6 +328,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
319 if (isr & ISR_MISC_FW_ERROR) { 328 if (isr & ISR_MISC_FW_ERROR) {
320 wil_notify_fw_error(wil); 329 wil_notify_fw_error(wil);
321 isr &= ~ISR_MISC_FW_ERROR; 330 isr &= ~ISR_MISC_FW_ERROR;
331 wil_fw_error_recovery(wil);
322 } 332 }
323 333
324 if (isr & ISR_MISC_MBOX_EVT) { 334 if (isr & ISR_MISC_MBOX_EVT) {
@@ -493,6 +503,23 @@ free0:
493 503
494 return rc; 504 return rc;
495} 505}
506/* can't use wil_ioread32_and_clear because ICC value is not ser yet */
507static inline void wil_clear32(void __iomem *addr)
508{
509 u32 x = ioread32(addr);
510
511 iowrite32(x, addr);
512}
513
514void wil6210_clear_irq(struct wil6210_priv *wil)
515{
516 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
517 offsetof(struct RGF_ICR, ICR));
518 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
519 offsetof(struct RGF_ICR, ICR));
520 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
521 offsetof(struct RGF_ICR, ICR));
522}
496 523
497int wil6210_init_irq(struct wil6210_priv *wil, int irq) 524int wil6210_init_irq(struct wil6210_priv *wil, int irq)
498{ 525{
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index fd30cddd5882..95f4efe9ef37 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -16,8 +16,14 @@
16 16
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/if_arp.h> 18#include <linux/if_arp.h>
19#include <linux/etherdevice.h>
19 20
20#include "wil6210.h" 21#include "wil6210.h"
22#include "txrx.h"
23
24static bool no_fw_recovery;
25module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
26MODULE_PARM_DESC(no_fw_recovery, " disable FW error recovery");
21 27
22/* 28/*
23 * Due to a hardware issue, 29 * Due to a hardware issue,
@@ -52,29 +58,74 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
52 __raw_writel(*s++, d++); 58 __raw_writel(*s++, d++);
53} 59}
54 60
55static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) 61static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
56{ 62{
57 uint i; 63 uint i;
58 struct net_device *ndev = wil_to_ndev(wil); 64 struct wil_sta_info *sta = &wil->sta[cid];
59 65
60 wil_dbg_misc(wil, "%s()\n", __func__); 66 sta->data_port_open = false;
67 if (sta->status != wil_sta_unused) {
68 wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
69 sta->status = wil_sta_unused;
70 }
61 71
62 wil_link_off(wil); 72 for (i = 0; i < WIL_STA_TID_NUM; i++) {
63 if (test_bit(wil_status_fwconnected, &wil->status)) { 73 struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
64 clear_bit(wil_status_fwconnected, &wil->status); 74 sta->tid_rx[i] = NULL;
65 cfg80211_disconnected(ndev, 75 wil_tid_ampdu_rx_free(wil, r);
66 WLAN_STATUS_UNSPECIFIED_FAILURE, 76 }
67 NULL, 0, GFP_KERNEL); 77 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
68 } else if (test_bit(wil_status_fwconnecting, &wil->status)) { 78 if (wil->vring2cid_tid[i][0] == cid)
69 cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, 79 wil_vring_fini_tx(wil, i);
70 WLAN_STATUS_UNSPECIFIED_FAILURE,
71 GFP_KERNEL);
72 } 80 }
73 clear_bit(wil_status_fwconnecting, &wil->status); 81 memset(&sta->stats, 0, sizeof(sta->stats));
74 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) 82}
75 wil_vring_fini_tx(wil, i);
76 83
77 clear_bit(wil_status_dontscan, &wil->status); 84static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
85{
86 int cid = -ENOENT;
87 struct net_device *ndev = wil_to_ndev(wil);
88 struct wireless_dev *wdev = wil->wdev;
89
90 might_sleep();
91 if (bssid) {
92 cid = wil_find_cid(wil, bssid);
93 wil_dbg_misc(wil, "%s(%pM, CID %d)\n", __func__, bssid, cid);
94 } else {
95 wil_dbg_misc(wil, "%s(all)\n", __func__);
96 }
97
98 if (cid >= 0) /* disconnect 1 peer */
99 wil_disconnect_cid(wil, cid);
100 else /* disconnect all */
101 for (cid = 0; cid < WIL6210_MAX_CID; cid++)
102 wil_disconnect_cid(wil, cid);
103
104 /* link state */
105 switch (wdev->iftype) {
106 case NL80211_IFTYPE_STATION:
107 case NL80211_IFTYPE_P2P_CLIENT:
108 wil_link_off(wil);
109 if (test_bit(wil_status_fwconnected, &wil->status)) {
110 clear_bit(wil_status_fwconnected, &wil->status);
111 cfg80211_disconnected(ndev,
112 WLAN_STATUS_UNSPECIFIED_FAILURE,
113 NULL, 0, GFP_KERNEL);
114 } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
115 cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
116 WLAN_STATUS_UNSPECIFIED_FAILURE,
117 GFP_KERNEL);
118 }
119 clear_bit(wil_status_fwconnecting, &wil->status);
120 break;
121 default:
122 /* AP-like interface and monitor:
123 * never scan, always connected
124 */
125 if (bssid)
126 cfg80211_del_sta(ndev, bssid, GFP_KERNEL);
127 break;
128 }
78} 129}
79 130
80static void wil_disconnect_worker(struct work_struct *work) 131static void wil_disconnect_worker(struct work_struct *work)
@@ -82,7 +133,9 @@ static void wil_disconnect_worker(struct work_struct *work)
82 struct wil6210_priv *wil = container_of(work, 133 struct wil6210_priv *wil = container_of(work,
83 struct wil6210_priv, disconnect_worker); 134 struct wil6210_priv, disconnect_worker);
84 135
136 mutex_lock(&wil->mutex);
85 _wil6210_disconnect(wil, NULL); 137 _wil6210_disconnect(wil, NULL);
138 mutex_unlock(&wil->mutex);
86} 139}
87 140
88static void wil_connect_timer_fn(ulong x) 141static void wil_connect_timer_fn(ulong x)
@@ -97,12 +150,55 @@ static void wil_connect_timer_fn(ulong x)
97 schedule_work(&wil->disconnect_worker); 150 schedule_work(&wil->disconnect_worker);
98} 151}
99 152
153static void wil_fw_error_worker(struct work_struct *work)
154{
155 struct wil6210_priv *wil = container_of(work,
156 struct wil6210_priv, fw_error_worker);
157 struct wireless_dev *wdev = wil->wdev;
158
159 wil_dbg_misc(wil, "fw error worker\n");
160
161 if (no_fw_recovery)
162 return;
163
164 mutex_lock(&wil->mutex);
165 switch (wdev->iftype) {
166 case NL80211_IFTYPE_STATION:
167 case NL80211_IFTYPE_P2P_CLIENT:
168 case NL80211_IFTYPE_MONITOR:
169 wil_info(wil, "fw error recovery started...\n");
170 wil_reset(wil);
171
172 /* need to re-allocate Rx ring after reset */
173 wil_rx_init(wil);
174 break;
175 case NL80211_IFTYPE_AP:
176 case NL80211_IFTYPE_P2P_GO:
177 /* recovery in these modes is done by upper layers */
178 break;
179 default:
180 break;
181 }
182 mutex_unlock(&wil->mutex);
183}
184
185static int wil_find_free_vring(struct wil6210_priv *wil)
186{
187 int i;
188 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
189 if (!wil->vring_tx[i].va)
190 return i;
191 }
192 return -EINVAL;
193}
194
100static void wil_connect_worker(struct work_struct *work) 195static void wil_connect_worker(struct work_struct *work)
101{ 196{
102 int rc; 197 int rc;
103 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 198 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
104 connect_worker); 199 connect_worker);
105 int cid = wil->pending_connect_cid; 200 int cid = wil->pending_connect_cid;
201 int ringid = wil_find_free_vring(wil);
106 202
107 if (cid < 0) { 203 if (cid < 0) {
108 wil_err(wil, "No connection pending\n"); 204 wil_err(wil, "No connection pending\n");
@@ -111,16 +207,22 @@ static void wil_connect_worker(struct work_struct *work)
111 207
112 wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid); 208 wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
113 209
114 rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0); 210 rc = wil_vring_init_tx(wil, ringid, WIL6210_TX_RING_SIZE, cid, 0);
115 wil->pending_connect_cid = -1; 211 wil->pending_connect_cid = -1;
116 if (rc == 0) 212 if (rc == 0) {
213 wil->sta[cid].status = wil_sta_connected;
117 wil_link_on(wil); 214 wil_link_on(wil);
215 } else {
216 wil->sta[cid].status = wil_sta_unused;
217 }
118} 218}
119 219
120int wil_priv_init(struct wil6210_priv *wil) 220int wil_priv_init(struct wil6210_priv *wil)
121{ 221{
122 wil_dbg_misc(wil, "%s()\n", __func__); 222 wil_dbg_misc(wil, "%s()\n", __func__);
123 223
224 memset(wil->sta, 0, sizeof(wil->sta));
225
124 mutex_init(&wil->mutex); 226 mutex_init(&wil->mutex);
125 mutex_init(&wil->wmi_mutex); 227 mutex_init(&wil->wmi_mutex);
126 228
@@ -132,6 +234,7 @@ int wil_priv_init(struct wil6210_priv *wil)
132 INIT_WORK(&wil->connect_worker, wil_connect_worker); 234 INIT_WORK(&wil->connect_worker, wil_connect_worker);
133 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 235 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
134 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); 236 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
237 INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
135 238
136 INIT_LIST_HEAD(&wil->pending_wmi_ev); 239 INIT_LIST_HEAD(&wil->pending_wmi_ev);
137 spin_lock_init(&wil->wmi_ev_lock); 240 spin_lock_init(&wil->wmi_ev_lock);
@@ -158,7 +261,10 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
158void wil_priv_deinit(struct wil6210_priv *wil) 261void wil_priv_deinit(struct wil6210_priv *wil)
159{ 262{
160 cancel_work_sync(&wil->disconnect_worker); 263 cancel_work_sync(&wil->disconnect_worker);
264 cancel_work_sync(&wil->fw_error_worker);
265 mutex_lock(&wil->mutex);
161 wil6210_disconnect(wil, NULL); 266 wil6210_disconnect(wil, NULL);
267 mutex_unlock(&wil->mutex);
162 wmi_event_flush(wil); 268 wmi_event_flush(wil);
163 destroy_workqueue(wil->wmi_wq_conn); 269 destroy_workqueue(wil->wmi_wq_conn);
164 destroy_workqueue(wil->wmi_wq); 270 destroy_workqueue(wil->wmi_wq);
@@ -166,40 +272,78 @@ void wil_priv_deinit(struct wil6210_priv *wil)
166 272
167static void wil_target_reset(struct wil6210_priv *wil) 273static void wil_target_reset(struct wil6210_priv *wil)
168{ 274{
275 int delay = 0;
276 u32 hw_state;
277 u32 rev_id;
278
169 wil_dbg_misc(wil, "Resetting...\n"); 279 wil_dbg_misc(wil, "Resetting...\n");
170 280
281 /* register read */
282#define R(a) ioread32(wil->csr + HOSTADDR(a))
171 /* register write */ 283 /* register write */
172#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a)) 284#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
173 /* register set = read, OR, write */ 285 /* register set = read, OR, write */
174#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \ 286#define S(a, v) W(a, R(a) | v)
175 wil->csr + HOSTADDR(a)) 287 /* register clear = read, AND with inverted, write */
288#define C(a, v) W(a, R(a) & ~v)
176 289
290 wil->hw_version = R(RGF_USER_FW_REV_ID);
291 rev_id = wil->hw_version & 0xff;
177 /* hpal_perst_from_pad_src_n_mask */ 292 /* hpal_perst_from_pad_src_n_mask */
178 S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6)); 293 S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
179 /* car_perst_rst_src_n_mask */ 294 /* car_perst_rst_src_n_mask */
180 S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7)); 295 S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
296 wmb(); /* order is important here */
181 297
182 W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ 298 W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
183 W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */ 299 W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
300 wmb(); /* order is important here */
184 301
185 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); 302 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
186 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); 303 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
187 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170); 304 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
188 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00); 305 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
306 wmb(); /* order is important here */
189 307
190 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); 308 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
191 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); 309 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
192 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); 310 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
193 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); 311 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
312 wmb(); /* order is important here */
194 313
195 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001); 314 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
196 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080); 315 if (rev_id == 1) {
316 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
317 } else {
318 W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
319 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
320 }
197 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); 321 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
322 wmb(); /* order is important here */
323
324 /* wait until device ready */
325 do {
326 msleep(1);
327 hw_state = R(RGF_USER_HW_MACHINE_STATE);
328 if (delay++ > 100) {
329 wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
330 hw_state);
331 return;
332 }
333 } while (hw_state != HW_MACHINE_BOOT_DONE);
334
335 if (rev_id == 2)
336 W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
337
338 C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
339 wmb(); /* order is important here */
198 340
199 wil_dbg_misc(wil, "Reset completed\n"); 341 wil_dbg_misc(wil, "Reset completed in %d ms\n", delay);
200 342
343#undef R
201#undef W 344#undef W
202#undef S 345#undef S
346#undef C
203} 347}
204 348
205void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) 349void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -234,11 +378,24 @@ int wil_reset(struct wil6210_priv *wil)
234{ 378{
235 int rc; 379 int rc;
236 380
381 WARN_ON(!mutex_is_locked(&wil->mutex));
382
237 cancel_work_sync(&wil->disconnect_worker); 383 cancel_work_sync(&wil->disconnect_worker);
238 wil6210_disconnect(wil, NULL); 384 wil6210_disconnect(wil, NULL);
239 385
386 wil->status = 0; /* prevent NAPI from being scheduled */
387 if (test_bit(wil_status_napi_en, &wil->status)) {
388 napi_synchronize(&wil->napi_rx);
389 }
390
391 if (wil->scan_request) {
392 wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
393 wil->scan_request);
394 cfg80211_scan_done(wil->scan_request, true);
395 wil->scan_request = NULL;
396 }
397
240 wil6210_disable_irq(wil); 398 wil6210_disable_irq(wil);
241 wil->status = 0;
242 399
243 wmi_event_flush(wil); 400 wmi_event_flush(wil);
244 401
@@ -248,6 +405,8 @@ int wil_reset(struct wil6210_priv *wil)
248 /* TODO: put MAC in reset */ 405 /* TODO: put MAC in reset */
249 wil_target_reset(wil); 406 wil_target_reset(wil);
250 407
408 wil_rx_fini(wil);
409
251 /* init after reset */ 410 /* init after reset */
252 wil->pending_connect_cid = -1; 411 wil->pending_connect_cid = -1;
253 reinit_completion(&wil->wmi_ready); 412 reinit_completion(&wil->wmi_ready);
@@ -261,6 +420,11 @@ int wil_reset(struct wil6210_priv *wil)
261 return rc; 420 return rc;
262} 421}
263 422
423void wil_fw_error_recovery(struct wil6210_priv *wil)
424{
425 wil_dbg_misc(wil, "starting fw error recovery\n");
426 schedule_work(&wil->fw_error_worker);
427}
264 428
265void wil_link_on(struct wil6210_priv *wil) 429void wil_link_on(struct wil6210_priv *wil)
266{ 430{
@@ -288,6 +452,8 @@ static int __wil_up(struct wil6210_priv *wil)
288 struct wireless_dev *wdev = wil->wdev; 452 struct wireless_dev *wdev = wil->wdev;
289 int rc; 453 int rc;
290 454
455 WARN_ON(!mutex_is_locked(&wil->mutex));
456
291 rc = wil_reset(wil); 457 rc = wil_reset(wil);
292 if (rc) 458 if (rc)
293 return rc; 459 return rc;
@@ -329,6 +495,7 @@ static int __wil_up(struct wil6210_priv *wil)
329 495
330 napi_enable(&wil->napi_rx); 496 napi_enable(&wil->napi_rx);
331 napi_enable(&wil->napi_tx); 497 napi_enable(&wil->napi_tx);
498 set_bit(wil_status_napi_en, &wil->status);
332 499
333 return 0; 500 return 0;
334} 501}
@@ -346,6 +513,9 @@ int wil_up(struct wil6210_priv *wil)
346 513
347static int __wil_down(struct wil6210_priv *wil) 514static int __wil_down(struct wil6210_priv *wil)
348{ 515{
516 WARN_ON(!mutex_is_locked(&wil->mutex));
517
518 clear_bit(wil_status_napi_en, &wil->status);
349 napi_disable(&wil->napi_rx); 519 napi_disable(&wil->napi_rx);
350 napi_disable(&wil->napi_tx); 520 napi_disable(&wil->napi_tx);
351 521
@@ -370,3 +540,19 @@ int wil_down(struct wil6210_priv *wil)
370 540
371 return rc; 541 return rc;
372} 542}
543
544int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
545{
546 int i;
547 int rc = -ENOENT;
548
549 for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
550 if ((wil->sta[i].status != wil_sta_unused) &&
551 ether_addr_equal(wil->sta[i].addr, mac)) {
552 rc = i;
553 break;
554 }
555 }
556
557 return rc;
558}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 717178f09aa8..fdcaeb820e75 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -127,8 +127,9 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
127 127
128 ndev->netdev_ops = &wil_netdev_ops; 128 ndev->netdev_ops = &wil_netdev_ops;
129 ndev->ieee80211_ptr = wdev; 129 ndev->ieee80211_ptr = wdev;
130 ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 130 ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
131 ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 131 NETIF_F_SG | NETIF_F_GRO;
132 ndev->features |= ndev->hw_features;
132 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); 133 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
133 wdev->netdev = ndev; 134 wdev->netdev = ndev;
134 135
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eeceab39cda2..f1e1bb338d68 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -41,39 +41,41 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
41 switch (use_msi) { 41 switch (use_msi) {
42 case 3: 42 case 3:
43 case 1: 43 case 1:
44 wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
45 break;
44 case 0: 46 case 0:
47 wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
45 break; 48 break;
46 default: 49 default:
47 wil_err(wil, "Invalid use_msi=%d, default to 1\n", 50 wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
48 use_msi);
49 use_msi = 1; 51 use_msi = 1;
50 } 52 }
51 wil->n_msi = use_msi; 53
52 if (wil->n_msi) { 54 if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
53 wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi); 55 wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
54 rc = pci_enable_msi_block(pdev, wil->n_msi); 56 use_msi = 1;
55 if (rc && (wil->n_msi == 3)) { 57 }
56 wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); 58
57 wil->n_msi = 1; 59 if (use_msi == 1 && pci_enable_msi(pdev)) {
58 rc = pci_enable_msi_block(pdev, wil->n_msi); 60 wil_err(wil, "pci_enable_msi failed, use INTx\n");
59 } 61 use_msi = 0;
60 if (rc) {
61 wil_err(wil, "pci_enable_msi failed, use INTx\n");
62 wil->n_msi = 0;
63 }
64 } else {
65 wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
66 } 62 }
67 63
64 wil->n_msi = use_msi;
65
68 rc = wil6210_init_irq(wil, pdev->irq); 66 rc = wil6210_init_irq(wil, pdev->irq);
69 if (rc) 67 if (rc)
70 goto stop_master; 68 goto stop_master;
71 69
72 /* need reset here to obtain MAC */ 70 /* need reset here to obtain MAC */
71 mutex_lock(&wil->mutex);
73 rc = wil_reset(wil); 72 rc = wil_reset(wil);
73 mutex_unlock(&wil->mutex);
74 if (rc) 74 if (rc)
75 goto release_irq; 75 goto release_irq;
76 76
77 wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
78
77 return 0; 79 return 0;
78 80
79 release_irq: 81 release_irq:
@@ -151,6 +153,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
151 pci_set_drvdata(pdev, wil); 153 pci_set_drvdata(pdev, wil);
152 wil->pdev = pdev; 154 wil->pdev = pdev;
153 155
156 wil6210_clear_irq(wil);
154 /* FW should raise IRQ when ready */ 157 /* FW should raise IRQ when ready */
155 rc = wil_if_pcie_enable(wil); 158 rc = wil_if_pcie_enable(wil);
156 if (rc) { 159 if (rc) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
new file mode 100644
index 000000000000..d04629fe053f
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -0,0 +1,177 @@
1#include "wil6210.h"
2#include "txrx.h"
3
4#define SEQ_MODULO 0x1000
5#define SEQ_MASK 0xfff
6
7static inline int seq_less(u16 sq1, u16 sq2)
8{
9 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
10}
11
12static inline u16 seq_inc(u16 sq)
13{
14 return (sq + 1) & SEQ_MASK;
15}
16
17static inline u16 seq_sub(u16 sq1, u16 sq2)
18{
19 return (sq1 - sq2) & SEQ_MASK;
20}
21
22static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
23{
24 return seq_sub(seq, r->ssn) % r->buf_size;
25}
26
27static void wil_release_reorder_frame(struct wil6210_priv *wil,
28 struct wil_tid_ampdu_rx *r,
29 int index)
30{
31 struct net_device *ndev = wil_to_ndev(wil);
32 struct sk_buff *skb = r->reorder_buf[index];
33
34 if (!skb)
35 goto no_frame;
36
37 /* release the frame from the reorder ring buffer */
38 r->stored_mpdu_num--;
39 r->reorder_buf[index] = NULL;
40 wil_netif_rx_any(skb, ndev);
41
42no_frame:
43 r->head_seq_num = seq_inc(r->head_seq_num);
44}
45
46static void wil_release_reorder_frames(struct wil6210_priv *wil,
47 struct wil_tid_ampdu_rx *r,
48 u16 hseq)
49{
50 int index;
51
52 while (seq_less(r->head_seq_num, hseq)) {
53 index = reorder_index(r, r->head_seq_num);
54 wil_release_reorder_frame(wil, r, index);
55 }
56}
57
58static void wil_reorder_release(struct wil6210_priv *wil,
59 struct wil_tid_ampdu_rx *r)
60{
61 int index = reorder_index(r, r->head_seq_num);
62
63 while (r->reorder_buf[index]) {
64 wil_release_reorder_frame(wil, r, index);
65 index = reorder_index(r, r->head_seq_num);
66 }
67}
68
69void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
70{
71 struct net_device *ndev = wil_to_ndev(wil);
72 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
73 int tid = wil_rxdesc_tid(d);
74 int cid = wil_rxdesc_cid(d);
75 int mid = wil_rxdesc_mid(d);
76 u16 seq = wil_rxdesc_seq(d);
77 struct wil_sta_info *sta = &wil->sta[cid];
78 struct wil_tid_ampdu_rx *r = sta->tid_rx[tid];
79 u16 hseq;
80 int index;
81
82 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n",
83 mid, cid, tid, seq);
84
85 if (!r) {
86 wil_netif_rx_any(skb, ndev);
87 return;
88 }
89
90 hseq = r->head_seq_num;
91
92 spin_lock(&r->reorder_lock);
93
94 /* frame with out of date sequence number */
95 if (seq_less(seq, r->head_seq_num)) {
96 dev_kfree_skb(skb);
97 goto out;
98 }
99
100 /*
101 * If frame the sequence number exceeds our buffering window
102 * size release some previous frames to make room for this one.
103 */
104 if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
105 hseq = seq_inc(seq_sub(seq, r->buf_size));
106 /* release stored frames up to new head to stack */
107 wil_release_reorder_frames(wil, r, hseq);
108 }
109
110 /* Now the new frame is always in the range of the reordering buffer */
111
112 index = reorder_index(r, seq);
113
114 /* check if we already stored this frame */
115 if (r->reorder_buf[index]) {
116 dev_kfree_skb(skb);
117 goto out;
118 }
119
120 /*
121 * If the current MPDU is in the right order and nothing else
122 * is stored we can process it directly, no need to buffer it.
123 * If it is first but there's something stored, we may be able
124 * to release frames after this one.
125 */
126 if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
127 r->head_seq_num = seq_inc(r->head_seq_num);
128 wil_netif_rx_any(skb, ndev);
129 goto out;
130 }
131
132 /* put the frame in the reordering buffer */
133 r->reorder_buf[index] = skb;
134 r->reorder_time[index] = jiffies;
135 r->stored_mpdu_num++;
136 wil_reorder_release(wil, r);
137
138out:
139 spin_unlock(&r->reorder_lock);
140}
141
142struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
143 int size, u16 ssn)
144{
145 struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
146 if (!r)
147 return NULL;
148
149 r->reorder_buf =
150 kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
151 r->reorder_time =
152 kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
153 if (!r->reorder_buf || !r->reorder_time) {
154 kfree(r->reorder_buf);
155 kfree(r->reorder_time);
156 kfree(r);
157 return NULL;
158 }
159
160 spin_lock_init(&r->reorder_lock);
161 r->ssn = ssn;
162 r->head_seq_num = ssn;
163 r->buf_size = size;
164 r->stored_mpdu_num = 0;
165 return r;
166}
167
168void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
169 struct wil_tid_ampdu_rx *r)
170{
171 if (!r)
172 return;
173 wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
174 kfree(r->reorder_buf);
175 kfree(r->reorder_time);
176 kfree(r);
177}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 0b0975d88b43..c8c547457eb4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,23 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
104 return 0; 104 return 0;
105} 105}
106 106
107static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
108 struct wil_ctx *ctx)
109{
110 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
111 u16 dmalen = le16_to_cpu(d->dma.length);
112 switch (ctx->mapped_as) {
113 case wil_mapped_as_single:
114 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
115 break;
116 case wil_mapped_as_page:
117 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
118 break;
119 default:
120 break;
121 }
122}
123
107static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, 124static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
108 int tx) 125 int tx)
109{ 126{
@@ -122,15 +139,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
122 139
123 ctx = &vring->ctx[vring->swtail]; 140 ctx = &vring->ctx[vring->swtail];
124 *d = *_d; 141 *d = *_d;
125 pa = wil_desc_addr(&d->dma.addr); 142 wil_txdesc_unmap(dev, d, ctx);
126 dmalen = le16_to_cpu(d->dma.length);
127 if (vring->ctx[vring->swtail].mapped_as_page) {
128 dma_unmap_page(dev, pa, dmalen,
129 DMA_TO_DEVICE);
130 } else {
131 dma_unmap_single(dev, pa, dmalen,
132 DMA_TO_DEVICE);
133 }
134 if (ctx->skb) 143 if (ctx->skb)
135 dev_kfree_skb_any(ctx->skb); 144 dev_kfree_skb_any(ctx->skb);
136 vring->swtail = wil_vring_next_tail(vring); 145 vring->swtail = wil_vring_next_tail(vring);
@@ -344,6 +353,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
344 u16 dmalen; 353 u16 dmalen;
345 u8 ftype; 354 u8 ftype;
346 u8 ds_bits; 355 u8 ds_bits;
356 int cid;
357 struct wil_net_stats *stats;
358
347 359
348 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 360 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
349 361
@@ -383,8 +395,10 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
383 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 395 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
384 skb->data, skb_headlen(skb), false); 396 skb->data, skb_headlen(skb), false);
385 397
386 398 cid = wil_rxdesc_cid(d);
387 wil->stats.last_mcs_rx = wil_rxdesc_mcs(d); 399 stats = &wil->sta[cid].stats;
400 stats->last_mcs_rx = wil_rxdesc_mcs(d);
401 wil->stats.last_mcs_rx = stats->last_mcs_rx;
388 402
389 /* use radiotap header only if required */ 403 /* use radiotap header only if required */
390 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 404 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
@@ -472,21 +486,28 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
472 * Pass Rx packet to the netif. Update statistics. 486 * Pass Rx packet to the netif. Update statistics.
473 * Called in softirq context (NAPI poll). 487 * Called in softirq context (NAPI poll).
474 */ 488 */
475static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 489void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
476{ 490{
477 int rc; 491 gro_result_t rc;
492 struct wil6210_priv *wil = ndev_to_wil(ndev);
478 unsigned int len = skb->len; 493 unsigned int len = skb->len;
494 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
495 int cid = wil_rxdesc_cid(d);
496 struct wil_net_stats *stats = &wil->sta[cid].stats;
479 497
480 skb_orphan(skb); 498 skb_orphan(skb);
481 499
482 rc = netif_receive_skb(skb); 500 rc = napi_gro_receive(&wil->napi_rx, skb);
483 501
484 if (likely(rc == NET_RX_SUCCESS)) { 502 if (unlikely(rc == GRO_DROP)) {
503 ndev->stats.rx_dropped++;
504 stats->rx_dropped++;
505 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
506 } else {
485 ndev->stats.rx_packets++; 507 ndev->stats.rx_packets++;
508 stats->rx_packets++;
486 ndev->stats.rx_bytes += len; 509 ndev->stats.rx_bytes += len;
487 510 stats->rx_bytes += len;
488 } else {
489 ndev->stats.rx_dropped++;
490 } 511 }
491} 512}
492 513
@@ -515,12 +536,18 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
515 skb->ip_summed = CHECKSUM_UNNECESSARY; 536 skb->ip_summed = CHECKSUM_UNNECESSARY;
516 skb->pkt_type = PACKET_OTHERHOST; 537 skb->pkt_type = PACKET_OTHERHOST;
517 skb->protocol = htons(ETH_P_802_2); 538 skb->protocol = htons(ETH_P_802_2);
518 539 wil_netif_rx_any(skb, ndev);
519 } else { 540 } else {
541 struct ethhdr *eth = (void *)skb->data;
542
520 skb->protocol = eth_type_trans(skb, ndev); 543 skb->protocol = eth_type_trans(skb, ndev);
544
545 if (is_unicast_ether_addr(eth->h_dest))
546 wil_rx_reorder(wil, skb);
547 else
548 wil_netif_rx_any(skb, ndev);
521 } 549 }
522 550
523 wil_netif_rx_any(skb, ndev);
524 } 551 }
525 wil_rx_refill(wil, v->size); 552 wil_rx_refill(wil, v->size);
526} 553}
@@ -530,6 +557,11 @@ int wil_rx_init(struct wil6210_priv *wil)
530 struct vring *vring = &wil->vring_rx; 557 struct vring *vring = &wil->vring_rx;
531 int rc; 558 int rc;
532 559
560 if (vring->va) {
561 wil_err(wil, "Rx ring already allocated\n");
562 return -EINVAL;
563 }
564
533 vring->size = WIL6210_RX_RING_SIZE; 565 vring->size = WIL6210_RX_RING_SIZE;
534 rc = wil_vring_alloc(wil, vring); 566 rc = wil_vring_alloc(wil, vring);
535 if (rc) 567 if (rc)
@@ -570,7 +602,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
570 .ring_size = cpu_to_le16(size), 602 .ring_size = cpu_to_le16(size),
571 }, 603 },
572 .ringid = id, 604 .ringid = id,
573 .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4), 605 .cidxtid = mk_cidxtid(cid, tid),
574 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 606 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
575 .mac_ctrl = 0, 607 .mac_ctrl = 0,
576 .to_resolution = 0, 608 .to_resolution = 0,
@@ -586,6 +618,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
586 struct wmi_vring_cfg_done_event cmd; 618 struct wmi_vring_cfg_done_event cmd;
587 } __packed reply; 619 } __packed reply;
588 struct vring *vring = &wil->vring_tx[id]; 620 struct vring *vring = &wil->vring_tx[id];
621 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
589 622
590 if (vring->va) { 623 if (vring->va) {
591 wil_err(wil, "Tx ring [%d] already allocated\n", id); 624 wil_err(wil, "Tx ring [%d] already allocated\n", id);
@@ -593,11 +626,15 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
593 goto out; 626 goto out;
594 } 627 }
595 628
629 memset(txdata, 0, sizeof(*txdata));
596 vring->size = size; 630 vring->size = size;
597 rc = wil_vring_alloc(wil, vring); 631 rc = wil_vring_alloc(wil, vring);
598 if (rc) 632 if (rc)
599 goto out; 633 goto out;
600 634
635 wil->vring2cid_tid[id][0] = cid;
636 wil->vring2cid_tid[id][1] = tid;
637
601 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 638 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
602 639
603 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), 640 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
@@ -613,6 +650,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
613 } 650 }
614 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 651 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
615 652
653 txdata->enabled = 1;
654
616 return 0; 655 return 0;
617 out_free: 656 out_free:
618 wil_vring_free(wil, vring, 1); 657 wil_vring_free(wil, vring, 1);
@@ -625,23 +664,116 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
625{ 664{
626 struct vring *vring = &wil->vring_tx[id]; 665 struct vring *vring = &wil->vring_tx[id];
627 666
667 WARN_ON(!mutex_is_locked(&wil->mutex));
668
628 if (!vring->va) 669 if (!vring->va)
629 return; 670 return;
630 671
672 /* make sure NAPI won't touch this vring */
673 wil->vring_tx_data[id].enabled = 0;
674 if (test_bit(wil_status_napi_en, &wil->status))
675 napi_synchronize(&wil->napi_tx);
676
631 wil_vring_free(wil, vring, 1); 677 wil_vring_free(wil, vring, 1);
632} 678}
633 679
634static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, 680static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
635 struct sk_buff *skb) 681 struct sk_buff *skb)
636{ 682{
637 struct vring *v = &wil->vring_tx[0]; 683 int i;
684 struct ethhdr *eth = (void *)skb->data;
685 int cid = wil_find_cid(wil, eth->h_dest);
686
687 if (cid < 0)
688 return NULL;
638 689
639 if (v->va) 690 if (!wil->sta[cid].data_port_open &&
640 return v; 691 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
692 return NULL;
693
694 /* TODO: fix for multiple TID */
695 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
696 if (wil->vring2cid_tid[i][0] == cid) {
697 struct vring *v = &wil->vring_tx[i];
698 wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
699 __func__, eth->h_dest, i);
700 if (v->va) {
701 return v;
702 } else {
703 wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
704 return NULL;
705 }
706 }
707 }
641 708
642 return NULL; 709 return NULL;
643} 710}
644 711
712static void wil_set_da_for_vring(struct wil6210_priv *wil,
713 struct sk_buff *skb, int vring_index)
714{
715 struct ethhdr *eth = (void *)skb->data;
716 int cid = wil->vring2cid_tid[vring_index][0];
717 memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
718}
719
720static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
721 struct sk_buff *skb);
722/*
723 * Find 1-st vring and return it; set dest address for this vring in skb
724 * duplicate skb and send it to other active vrings
725 */
726static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
727 struct sk_buff *skb)
728{
729 struct vring *v, *v2;
730 struct sk_buff *skb2;
731 int i;
732 u8 cid;
733
734 /* find 1-st vring eligible for data */
735 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
736 v = &wil->vring_tx[i];
737 if (!v->va)
738 continue;
739
740 cid = wil->vring2cid_tid[i][0];
741 if (!wil->sta[cid].data_port_open)
742 continue;
743
744 goto found;
745 }
746
747 wil_err(wil, "Tx while no vrings active?\n");
748
749 return NULL;
750
751found:
752 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
753 wil_set_da_for_vring(wil, skb, i);
754
755 /* find other active vrings and duplicate skb for each */
756 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
757 v2 = &wil->vring_tx[i];
758 if (!v2->va)
759 continue;
760 cid = wil->vring2cid_tid[i][0];
761 if (!wil->sta[cid].data_port_open)
762 continue;
763
764 skb2 = skb_copy(skb, GFP_ATOMIC);
765 if (skb2) {
766 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
767 wil_set_da_for_vring(wil, skb2, i);
768 wil_tx_vring(wil, v2, skb2);
769 } else {
770 wil_err(wil, "skb_copy failed\n");
771 }
772 }
773
774 return v;
775}
776
645static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, 777static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
646 int vring_index) 778 int vring_index)
647{ 779{
@@ -667,6 +799,13 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
667 return 0; 799 return 0;
668} 800}
669 801
802static inline
803void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
804{
805 d->mac.d[2] |= ((nr_frags + 1) <<
806 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
807}
808
670static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, 809static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
671 struct vring_tx_desc *d, 810 struct vring_tx_desc *d,
672 struct sk_buff *skb) 811 struct sk_buff *skb)
@@ -731,8 +870,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
731 870
732 wil_dbg_txrx(wil, "%s()\n", __func__); 871 wil_dbg_txrx(wil, "%s()\n", __func__);
733 872
734 if (avail < vring->size/8)
735 netif_tx_stop_all_queues(wil_to_ndev(wil));
736 if (avail < 1 + nr_frags) { 873 if (avail < 1 + nr_frags) {
737 wil_err(wil, "Tx ring full. No space for %d fragments\n", 874 wil_err(wil, "Tx ring full. No space for %d fragments\n",
738 1 + nr_frags); 875 1 + nr_frags);
@@ -740,9 +877,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
740 } 877 }
741 _d = &(vring->va[i].tx); 878 _d = &(vring->va[i].tx);
742 879
743 /* FIXME FW can accept only unicast frames for the peer */
744 memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
745
746 pa = dma_map_single(dev, skb->data, 880 pa = dma_map_single(dev, skb->data,
747 skb_headlen(skb), DMA_TO_DEVICE); 881 skb_headlen(skb), DMA_TO_DEVICE);
748 882
@@ -753,6 +887,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
753 887
754 if (unlikely(dma_mapping_error(dev, pa))) 888 if (unlikely(dma_mapping_error(dev, pa)))
755 return -EINVAL; 889 return -EINVAL;
890 vring->ctx[i].mapped_as = wil_mapped_as_single;
756 /* 1-st segment */ 891 /* 1-st segment */
757 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 892 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
758 /* Process TCP/UDP checksum offloading */ 893 /* Process TCP/UDP checksum offloading */
@@ -762,8 +897,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
762 goto dma_error; 897 goto dma_error;
763 } 898 }
764 899
765 d->mac.d[2] |= ((nr_frags + 1) << 900 vring->ctx[i].nr_frags = nr_frags;
766 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 901 wil_tx_desc_set_nr_frags(d, nr_frags);
767 if (nr_frags) 902 if (nr_frags)
768 *_d = *d; 903 *_d = *d;
769 904
@@ -778,8 +913,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
778 DMA_TO_DEVICE); 913 DMA_TO_DEVICE);
779 if (unlikely(dma_mapping_error(dev, pa))) 914 if (unlikely(dma_mapping_error(dev, pa)))
780 goto dma_error; 915 goto dma_error;
916 vring->ctx[i].mapped_as = wil_mapped_as_page;
781 wil_tx_desc_map(d, pa, len, vring_index); 917 wil_tx_desc_map(d, pa, len, vring_index);
782 vring->ctx[i].mapped_as_page = 1; 918 /* no need to check return code -
919 * if it succeeded for 1-st descriptor,
920 * it will succeed here too
921 */
922 wil_tx_desc_offload_cksum_set(wil, d, skb);
783 *_d = *d; 923 *_d = *d;
784 } 924 }
785 /* for the last seg only */ 925 /* for the last seg only */
@@ -808,7 +948,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
808 /* unmap what we have mapped */ 948 /* unmap what we have mapped */
809 nr_frags = f + 1; /* frags mapped + one for skb head */ 949 nr_frags = f + 1; /* frags mapped + one for skb head */
810 for (f = 0; f < nr_frags; f++) { 950 for (f = 0; f < nr_frags; f++) {
811 u16 dmalen;
812 struct wil_ctx *ctx; 951 struct wil_ctx *ctx;
813 952
814 i = (swhead + f) % vring->size; 953 i = (swhead + f) % vring->size;
@@ -816,12 +955,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
816 _d = &(vring->va[i].tx); 955 _d = &(vring->va[i].tx);
817 *d = *_d; 956 *d = *_d;
818 _d->dma.status = TX_DMA_STATUS_DU; 957 _d->dma.status = TX_DMA_STATUS_DU;
819 pa = wil_desc_addr(&d->dma.addr); 958 wil_txdesc_unmap(dev, d, ctx);
820 dmalen = le16_to_cpu(d->dma.length);
821 if (ctx->mapped_as_page)
822 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
823 else
824 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
825 959
826 if (ctx->skb) 960 if (ctx->skb)
827 dev_kfree_skb_any(ctx->skb); 961 dev_kfree_skb_any(ctx->skb);
@@ -836,12 +970,17 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
836netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) 970netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
837{ 971{
838 struct wil6210_priv *wil = ndev_to_wil(ndev); 972 struct wil6210_priv *wil = ndev_to_wil(ndev);
973 struct ethhdr *eth = (void *)skb->data;
839 struct vring *vring; 974 struct vring *vring;
975 static bool pr_once_fw;
840 int rc; 976 int rc;
841 977
842 wil_dbg_txrx(wil, "%s()\n", __func__); 978 wil_dbg_txrx(wil, "%s()\n", __func__);
843 if (!test_bit(wil_status_fwready, &wil->status)) { 979 if (!test_bit(wil_status_fwready, &wil->status)) {
844 wil_err(wil, "FW not ready\n"); 980 if (!pr_once_fw) {
981 wil_err(wil, "FW not ready\n");
982 pr_once_fw = true;
983 }
845 goto drop; 984 goto drop;
846 } 985 }
847 if (!test_bit(wil_status_fwconnected, &wil->status)) { 986 if (!test_bit(wil_status_fwconnected, &wil->status)) {
@@ -852,16 +991,25 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
852 wil_err(wil, "Xmit in monitor mode not supported\n"); 991 wil_err(wil, "Xmit in monitor mode not supported\n");
853 goto drop; 992 goto drop;
854 } 993 }
994 pr_once_fw = false;
855 995
856 /* find vring */ 996 /* find vring */
857 vring = wil_find_tx_vring(wil, skb); 997 if (is_unicast_ether_addr(eth->h_dest)) {
998 vring = wil_find_tx_vring(wil, skb);
999 } else {
1000 vring = wil_tx_bcast(wil, skb);
1001 }
858 if (!vring) { 1002 if (!vring) {
859 wil_err(wil, "No Tx VRING available\n"); 1003 wil_err(wil, "No Tx VRING found for %pM\n", eth->h_dest);
860 goto drop; 1004 goto drop;
861 } 1005 }
862 /* set up vring entry */ 1006 /* set up vring entry */
863 rc = wil_tx_vring(wil, vring, skb); 1007 rc = wil_tx_vring(wil, vring, skb);
864 1008
1009 /* do we still have enough room in the vring? */
1010 if (wil_vring_avail_tx(vring) < vring->size/8)
1011 netif_tx_stop_all_queues(wil_to_ndev(wil));
1012
865 switch (rc) { 1013 switch (rc) {
866 case 0: 1014 case 0:
867 /* statistics will be updated on the tx_complete */ 1015 /* statistics will be updated on the tx_complete */
@@ -891,64 +1039,82 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
891 struct net_device *ndev = wil_to_ndev(wil); 1039 struct net_device *ndev = wil_to_ndev(wil);
892 struct device *dev = wil_to_dev(wil); 1040 struct device *dev = wil_to_dev(wil);
893 struct vring *vring = &wil->vring_tx[ringid]; 1041 struct vring *vring = &wil->vring_tx[ringid];
1042 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
894 int done = 0; 1043 int done = 0;
1044 int cid = wil->vring2cid_tid[ringid][0];
1045 struct wil_net_stats *stats = &wil->sta[cid].stats;
1046 volatile struct vring_tx_desc *_d;
895 1047
896 if (!vring->va) { 1048 if (!vring->va) {
897 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 1049 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
898 return 0; 1050 return 0;
899 } 1051 }
900 1052
1053 if (!txdata->enabled) {
1054 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
1055 return 0;
1056 }
1057
901 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 1058 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
902 1059
903 while (!wil_vring_is_empty(vring)) { 1060 while (!wil_vring_is_empty(vring)) {
904 volatile struct vring_tx_desc *_d = 1061 int new_swtail;
905 &vring->va[vring->swtail].tx;
906 struct vring_tx_desc dd, *d = &dd;
907 dma_addr_t pa;
908 u16 dmalen;
909 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 1062 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
910 struct sk_buff *skb = ctx->skb; 1063 /**
911 1064 * For the fragmented skb, HW will set DU bit only for the
912 *d = *_d; 1065 * last fragment. look for it
1066 */
1067 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
1068 /* TODO: check we are not past head */
913 1069
914 if (!(d->dma.status & TX_DMA_STATUS_DU)) 1070 _d = &vring->va[lf].tx;
1071 if (!(_d->dma.status & TX_DMA_STATUS_DU))
915 break; 1072 break;
916 1073
917 dmalen = le16_to_cpu(d->dma.length); 1074 new_swtail = (lf + 1) % vring->size;
918 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 1075 while (vring->swtail != new_swtail) {
919 d->dma.error); 1076 struct vring_tx_desc dd, *d = &dd;
920 wil_dbg_txrx(wil, 1077 u16 dmalen;
921 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", 1078 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
922 vring->swtail, dmalen, d->dma.status, 1079 struct sk_buff *skb = ctx->skb;
923 d->dma.error); 1080 _d = &vring->va[vring->swtail].tx;
924 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, 1081
925 (const void *)d, sizeof(*d), false); 1082 *d = *_d;
926 1083
927 pa = wil_desc_addr(&d->dma.addr); 1084 dmalen = le16_to_cpu(d->dma.length);
928 if (ctx->mapped_as_page) 1085 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
929 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 1086 d->dma.error);
930 else 1087 wil_dbg_txrx(wil,
931 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 1088 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
932 1089 vring->swtail, dmalen, d->dma.status,
933 if (skb) { 1090 d->dma.error);
934 if (d->dma.error == 0) { 1091 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
935 ndev->stats.tx_packets++; 1092 (const void *)d, sizeof(*d), false);
936 ndev->stats.tx_bytes += skb->len; 1093
937 } else { 1094 wil_txdesc_unmap(dev, d, ctx);
938 ndev->stats.tx_errors++; 1095
939 } 1096 if (skb) {
1097 if (d->dma.error == 0) {
1098 ndev->stats.tx_packets++;
1099 stats->tx_packets++;
1100 ndev->stats.tx_bytes += skb->len;
1101 stats->tx_bytes += skb->len;
1102 } else {
1103 ndev->stats.tx_errors++;
1104 stats->tx_errors++;
1105 }
940 1106
941 dev_kfree_skb_any(skb); 1107 dev_kfree_skb_any(skb);
1108 }
1109 memset(ctx, 0, sizeof(*ctx));
1110 /* There is no need to touch HW descriptor:
1111 * - ststus bit TX_DMA_STATUS_DU is set by design,
1112 * so hardware will not try to process this desc.,
1113 * - rest of descriptor will be initialized on Tx.
1114 */
1115 vring->swtail = wil_vring_next_tail(vring);
1116 done++;
942 } 1117 }
943 memset(ctx, 0, sizeof(*ctx));
944 /*
945 * There is no need to touch HW descriptor:
946 * - ststus bit TX_DMA_STATUS_DU is set by design,
947 * so hardware will not try to process this desc.,
948 * - rest of descriptor will be initialized on Tx.
949 */
950 vring->swtail = wil_vring_next_tail(vring);
951 done++;
952 } 1118 }
953 if (wil_vring_avail_tx(vring) > vring->size/4) 1119 if (wil_vring_avail_tx(vring) > vring->size/4)
954 netif_tx_wake_all_queues(wil_to_ndev(wil)); 1120 netif_tx_wake_all_queues(wil_to_ndev(wil));
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index b3828279204c..bc5706a4f007 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -436,4 +436,11 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
436 return (void *)skb->cb; 436 return (void *)skb->cb;
437} 437}
438 438
439void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
440void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
441struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
442 int size, u16 ssn);
443void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
444 struct wil_tid_ampdu_rx *r);
445
439#endif /* WIL6210_TXRX_H */ 446#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1f91eaf95bbe..2a2dec75f026 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -74,23 +74,21 @@ struct RGF_ICR {
74} __packed; 74} __packed;
75 75
76/* registers - FW addresses */ 76/* registers - FW addresses */
77#define RGF_USER_USER_SCRATCH_PAD (0x8802bc) 77#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
78#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */ 78 #define HW_MACHINE_BOOT_DONE (0x3fffffd)
79 #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
80#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
81#define RGF_USER_MAC_CPU_0 (0x8801fc)
82#define RGF_USER_USER_CPU_0 (0x8801e0) 79#define RGF_USER_USER_CPU_0 (0x8801e0)
80#define RGF_USER_MAC_CPU_0 (0x8801fc)
81#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
82#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
83#define RGF_USER_CLKS_CTL_0 (0x880abc)
84 #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */
83#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04) 85#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
84#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08) 86#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
85#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c) 87#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
86#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10) 88#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
87 89#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
88#define RGF_DMA_PSEUDO_CAUSE (0x881c68) 90#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
89#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c) 91 #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
90#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
91 #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
92 #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
93 #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
94 92
95#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */ 93#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
96 #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0) 94 #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
@@ -105,13 +103,22 @@ struct RGF_ICR {
105/* Interrupt moderation control */ 103/* Interrupt moderation control */
106#define RGF_DMA_ITR_CNT_TRSH (0x881c5c) 104#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
107#define RGF_DMA_ITR_CNT_DATA (0x881c60) 105#define RGF_DMA_ITR_CNT_DATA (0x881c60)
108#define RGF_DMA_ITR_CNT_CRL (0x881C64) 106#define RGF_DMA_ITR_CNT_CRL (0x881c64)
109 #define BIT_DMA_ITR_CNT_CRL_EN BIT(0) 107 #define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
110 #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1) 108 #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
111 #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2) 109 #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
112 #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3) 110 #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
113 #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4) 111 #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
114 112
113#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
114#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
115#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
116 #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
117 #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
118 #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
119
120#define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4)
121
115/* popular locations */ 122/* popular locations */
116#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD) 123#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
117#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \ 124#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
@@ -125,6 +132,31 @@ struct RGF_ICR {
125 132
126/* Hardware definitions end */ 133/* Hardware definitions end */
127 134
135/**
136 * mk_cidxtid - construct @cidxtid field
137 * @cid: CID value
138 * @tid: TID value
139 *
140 * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
141 */
142static inline u8 mk_cidxtid(u8 cid, u8 tid)
143{
144 return ((tid & 0xf) << 4) | (cid & 0xf);
145}
146
147/**
148 * parse_cidxtid - parse @cidxtid field
149 * @cid: store CID value here
150 * @tid: store TID value here
151 *
152 * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
153 */
154static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
155{
156 *cid = cidxtid & 0xf;
157 *tid = (cidxtid >> 4) & 0xf;
158}
159
128struct wil6210_mbox_ring { 160struct wil6210_mbox_ring {
129 u32 base; 161 u32 base;
130 u16 entry_size; /* max. size of mbox entry, incl. all headers */ 162 u16 entry_size; /* max. size of mbox entry, incl. all headers */
@@ -184,12 +216,19 @@ struct pending_wmi_event {
184 } __packed event; 216 } __packed event;
185}; 217};
186 218
219enum { /* for wil_ctx.mapped_as */
220 wil_mapped_as_none = 0,
221 wil_mapped_as_single = 1,
222 wil_mapped_as_page = 2,
223};
224
187/** 225/**
188 * struct wil_ctx - software context for Vring descriptor 226 * struct wil_ctx - software context for Vring descriptor
189 */ 227 */
190struct wil_ctx { 228struct wil_ctx {
191 struct sk_buff *skb; 229 struct sk_buff *skb;
192 u8 mapped_as_page:1; 230 u8 nr_frags;
231 u8 mapped_as;
193}; 232};
194 233
195union vring_desc; 234union vring_desc;
@@ -204,6 +243,14 @@ struct vring {
204 struct wil_ctx *ctx; /* ctx[size] - software context */ 243 struct wil_ctx *ctx; /* ctx[size] - software context */
205}; 244};
206 245
246/**
247 * Additional data for Tx Vring
248 */
249struct vring_tx_data {
250 int enabled;
251
252};
253
207enum { /* for wil6210_priv.status */ 254enum { /* for wil6210_priv.status */
208 wil_status_fwready = 0, 255 wil_status_fwready = 0,
209 wil_status_fwconnecting, 256 wil_status_fwconnecting,
@@ -211,10 +258,51 @@ enum { /* for wil6210_priv.status */
211 wil_status_dontscan, 258 wil_status_dontscan,
212 wil_status_reset_done, 259 wil_status_reset_done,
213 wil_status_irqen, /* FIXME: interrupts enabled - for debug */ 260 wil_status_irqen, /* FIXME: interrupts enabled - for debug */
261 wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
214}; 262};
215 263
216struct pci_dev; 264struct pci_dev;
217 265
266/**
267 * struct tid_ampdu_rx - TID aggregation information (Rx).
268 *
269 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
270 * @reorder_time: jiffies when skb was added
271 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
272 * @reorder_timer: releases expired frames from the reorder buffer.
273 * @last_rx: jiffies of last rx activity
274 * @head_seq_num: head sequence number in reordering buffer.
275 * @stored_mpdu_num: number of MPDUs in reordering buffer
276 * @ssn: Starting Sequence Number expected to be aggregated.
277 * @buf_size: buffer size for incoming A-MPDUs
278 * @timeout: reset timer value (in TUs).
279 * @dialog_token: dialog token for aggregation session
280 * @rcu_head: RCU head used for freeing this struct
281 * @reorder_lock: serializes access to reorder buffer, see below.
282 *
283 * This structure's lifetime is managed by RCU, assignments to
284 * the array holding it must hold the aggregation mutex.
285 *
286 * The @reorder_lock is used to protect the members of this
287 * struct, except for @timeout, @buf_size and @dialog_token,
288 * which are constant across the lifetime of the struct (the
289 * dialog token being used only for debugging).
290 */
291struct wil_tid_ampdu_rx {
292 spinlock_t reorder_lock; /* see above */
293 struct sk_buff **reorder_buf;
294 unsigned long *reorder_time;
295 struct timer_list session_timer;
296 struct timer_list reorder_timer;
297 unsigned long last_rx;
298 u16 head_seq_num;
299 u16 stored_mpdu_num;
300 u16 ssn;
301 u16 buf_size;
302 u16 timeout;
303 u8 dialog_token;
304};
305
218struct wil6210_stats { 306struct wil6210_stats {
219 u64 tsf; 307 u64 tsf;
220 u32 snr; 308 u32 snr;
@@ -226,6 +314,43 @@ struct wil6210_stats {
226 u16 peer_tx_sector; 314 u16 peer_tx_sector;
227}; 315};
228 316
317enum wil_sta_status {
318 wil_sta_unused = 0,
319 wil_sta_conn_pending = 1,
320 wil_sta_connected = 2,
321};
322
323#define WIL_STA_TID_NUM (16)
324
325struct wil_net_stats {
326 unsigned long rx_packets;
327 unsigned long tx_packets;
328 unsigned long rx_bytes;
329 unsigned long tx_bytes;
330 unsigned long tx_errors;
331 unsigned long rx_dropped;
332 u16 last_mcs_rx;
333};
334
335/**
336 * struct wil_sta_info - data for peer
337 *
338 * Peer identified by its CID (connection ID)
339 * NIC performs beam forming for each peer;
340 * if no beam forming done, frame exchange is not
341 * possible.
342 */
343struct wil_sta_info {
344 u8 addr[ETH_ALEN];
345 enum wil_sta_status status;
346 struct wil_net_stats stats;
347 bool data_port_open; /* can send any data, not only EAPOL */
348 /* Rx BACK */
349 struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
350 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
351 unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
352};
353
229struct wil6210_priv { 354struct wil6210_priv {
230 struct pci_dev *pdev; 355 struct pci_dev *pdev;
231 int n_msi; 356 int n_msi;
@@ -233,6 +358,7 @@ struct wil6210_priv {
233 void __iomem *csr; 358 void __iomem *csr;
234 ulong status; 359 ulong status;
235 u32 fw_version; 360 u32 fw_version;
361 u32 hw_version;
236 u8 n_mids; /* number of additional MIDs as reported by FW */ 362 u8 n_mids; /* number of additional MIDs as reported by FW */
237 /* profile */ 363 /* profile */
238 u32 monitor_flags; 364 u32 monitor_flags;
@@ -253,6 +379,7 @@ struct wil6210_priv {
253 struct workqueue_struct *wmi_wq_conn; /* for connect worker */ 379 struct workqueue_struct *wmi_wq_conn; /* for connect worker */
254 struct work_struct connect_worker; 380 struct work_struct connect_worker;
255 struct work_struct disconnect_worker; 381 struct work_struct disconnect_worker;
382 struct work_struct fw_error_worker; /* for FW error recovery */
256 struct timer_list connect_timer; 383 struct timer_list connect_timer;
257 int pending_connect_cid; 384 int pending_connect_cid;
258 struct list_head pending_wmi_ev; 385 struct list_head pending_wmi_ev;
@@ -267,7 +394,9 @@ struct wil6210_priv {
267 /* DMA related */ 394 /* DMA related */
268 struct vring vring_rx; 395 struct vring vring_rx;
269 struct vring vring_tx[WIL6210_MAX_TX_RINGS]; 396 struct vring vring_tx[WIL6210_MAX_TX_RINGS];
270 u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN]; 397 struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
398 u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
399 struct wil_sta_info sta[WIL6210_MAX_CID];
271 /* scan */ 400 /* scan */
272 struct cfg80211_scan_request *scan_request; 401 struct cfg80211_scan_request *scan_request;
273 402
@@ -329,11 +458,13 @@ void wil_if_remove(struct wil6210_priv *wil);
329int wil_priv_init(struct wil6210_priv *wil); 458int wil_priv_init(struct wil6210_priv *wil);
330void wil_priv_deinit(struct wil6210_priv *wil); 459void wil_priv_deinit(struct wil6210_priv *wil);
331int wil_reset(struct wil6210_priv *wil); 460int wil_reset(struct wil6210_priv *wil);
461void wil_fw_error_recovery(struct wil6210_priv *wil);
332void wil_link_on(struct wil6210_priv *wil); 462void wil_link_on(struct wil6210_priv *wil);
333void wil_link_off(struct wil6210_priv *wil); 463void wil_link_off(struct wil6210_priv *wil);
334int wil_up(struct wil6210_priv *wil); 464int wil_up(struct wil6210_priv *wil);
335int wil_down(struct wil6210_priv *wil); 465int wil_down(struct wil6210_priv *wil);
336void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); 466void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
467int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
337 468
338void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr); 469void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
339void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr); 470void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
@@ -357,8 +488,11 @@ int wmi_echo(struct wil6210_priv *wil);
357int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); 488int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
358int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); 489int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
359int wmi_p2p_cfg(struct wil6210_priv *wil, int channel); 490int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
491int wmi_rxon(struct wil6210_priv *wil, bool on);
360int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); 492int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
493int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
361 494
495void wil6210_clear_irq(struct wil6210_priv *wil);
362int wil6210_init_irq(struct wil6210_priv *wil, int irq); 496int wil6210_init_irq(struct wil6210_priv *wil, int irq);
363void wil6210_fini_irq(struct wil6210_priv *wil, int irq); 497void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
364void wil6210_disable_irq(struct wil6210_priv *wil); 498void wil6210_disable_irq(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 063963ee422a..2ba56eef0c45 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -307,14 +307,14 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
307 u32 freq = ieee80211_channel_to_frequency(ch_no, 307 u32 freq = ieee80211_channel_to_frequency(ch_no,
308 IEEE80211_BAND_60GHZ); 308 IEEE80211_BAND_60GHZ);
309 struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); 309 struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
310 /* TODO convert LE to CPU */ 310 s32 signal = data->info.sqi;
311 s32 signal = 0; /* TODO */
312 __le16 fc = rx_mgmt_frame->frame_control; 311 __le16 fc = rx_mgmt_frame->frame_control;
313 u32 d_len = le32_to_cpu(data->info.len); 312 u32 d_len = le32_to_cpu(data->info.len);
314 u16 d_status = le16_to_cpu(data->info.status); 313 u16 d_status = le16_to_cpu(data->info.status);
315 314
316 wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", 315 wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
317 data->info.channel, data->info.mcs, data->info.snr); 316 data->info.channel, data->info.mcs, data->info.snr,
317 data->info.sqi);
318 wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, 318 wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
319 le16_to_cpu(fc)); 319 le16_to_cpu(fc));
320 wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", 320 wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
@@ -384,6 +384,11 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
384 evt->assoc_req_len, evt->assoc_resp_len); 384 evt->assoc_req_len, evt->assoc_resp_len);
385 return; 385 return;
386 } 386 }
387 if (evt->cid >= WIL6210_MAX_CID) {
388 wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
389 return;
390 }
391
387 ch = evt->channel + 1; 392 ch = evt->channel + 1;
388 wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", 393 wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
389 evt->bssid, ch, evt->cid); 394 evt->bssid, ch, evt->cid);
@@ -439,7 +444,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
439 444
440 /* FIXME FW can transmit only ucast frames to peer */ 445 /* FIXME FW can transmit only ucast frames to peer */
441 /* FIXME real ring_id instead of hard coded 0 */ 446 /* FIXME real ring_id instead of hard coded 0 */
442 memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN); 447 memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
448 wil->sta[evt->cid].status = wil_sta_conn_pending;
443 449
444 wil->pending_connect_cid = evt->cid; 450 wil->pending_connect_cid = evt->cid;
445 queue_work(wil->wmi_wq_conn, &wil->connect_worker); 451 queue_work(wil->wmi_wq_conn, &wil->connect_worker);
@@ -456,7 +462,9 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
456 462
457 wil->sinfo_gen++; 463 wil->sinfo_gen++;
458 464
465 mutex_lock(&wil->mutex);
459 wil6210_disconnect(wil, evt->bssid); 466 wil6210_disconnect(wil, evt->bssid);
467 mutex_unlock(&wil->mutex);
460} 468}
461 469
462static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len) 470static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
@@ -476,11 +484,11 @@ static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
476 wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector); 484 wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
477 wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector); 485 wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
478 wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n" 486 wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
479 "BF status 0x%08x SNR 0x%08x\n" 487 "BF status 0x%08x SNR 0x%08x SQI %d%%\n"
480 "Tx Tpt %d goodput %d Rx goodput %d\n" 488 "Tx Tpt %d goodput %d Rx goodput %d\n"
481 "Sectors(rx:tx) my %d:%d peer %d:%d\n", 489 "Sectors(rx:tx) my %d:%d peer %d:%d\n",
482 wil->stats.bf_mcs, wil->stats.tsf, evt->status, 490 wil->stats.bf_mcs, wil->stats.tsf, evt->status,
483 wil->stats.snr, le32_to_cpu(evt->tx_tpt), 491 wil->stats.snr, evt->sqi, le32_to_cpu(evt->tx_tpt),
484 le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput), 492 le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
485 wil->stats.my_rx_sector, wil->stats.my_tx_sector, 493 wil->stats.my_rx_sector, wil->stats.my_tx_sector,
486 wil->stats.peer_rx_sector, wil->stats.peer_tx_sector); 494 wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
@@ -499,10 +507,16 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
499 int sz = eapol_len + ETH_HLEN; 507 int sz = eapol_len + ETH_HLEN;
500 struct sk_buff *skb; 508 struct sk_buff *skb;
501 struct ethhdr *eth; 509 struct ethhdr *eth;
510 int cid;
511 struct wil_net_stats *stats = NULL;
502 512
503 wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len, 513 wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
504 evt->src_mac); 514 evt->src_mac);
505 515
516 cid = wil_find_cid(wil, evt->src_mac);
517 if (cid >= 0)
518 stats = &wil->sta[cid].stats;
519
506 if (eapol_len > 196) { /* TODO: revisit size limit */ 520 if (eapol_len > 196) { /* TODO: revisit size limit */
507 wil_err(wil, "EAPOL too large\n"); 521 wil_err(wil, "EAPOL too large\n");
508 return; 522 return;
@@ -513,6 +527,7 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
513 wil_err(wil, "Failed to allocate skb\n"); 527 wil_err(wil, "Failed to allocate skb\n");
514 return; 528 return;
515 } 529 }
530
516 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN); 531 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
517 memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN); 532 memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
518 memcpy(eth->h_source, evt->src_mac, ETH_ALEN); 533 memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
@@ -521,9 +536,15 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
521 skb->protocol = eth_type_trans(skb, ndev); 536 skb->protocol = eth_type_trans(skb, ndev);
522 if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) { 537 if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
523 ndev->stats.rx_packets++; 538 ndev->stats.rx_packets++;
524 ndev->stats.rx_bytes += skb->len; 539 ndev->stats.rx_bytes += sz;
540 if (stats) {
541 stats->rx_packets++;
542 stats->rx_bytes += sz;
543 }
525 } else { 544 } else {
526 ndev->stats.rx_dropped++; 545 ndev->stats.rx_dropped++;
546 if (stats)
547 stats->rx_dropped++;
527 } 548 }
528} 549}
529 550
@@ -531,9 +552,16 @@ static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
531{ 552{
532 struct net_device *ndev = wil_to_ndev(wil); 553 struct net_device *ndev = wil_to_ndev(wil);
533 struct wmi_data_port_open_event *evt = d; 554 struct wmi_data_port_open_event *evt = d;
555 u8 cid = evt->cid;
534 556
535 wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid); 557 wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
536 558
559 if (cid >= ARRAY_SIZE(wil->sta)) {
560 wil_err(wil, "Link UP for invalid CID %d\n", cid);
561 return;
562 }
563
564 wil->sta[cid].data_port_open = true;
537 netif_carrier_on(ndev); 565 netif_carrier_on(ndev);
538} 566}
539 567
@@ -541,10 +569,17 @@ static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
541{ 569{
542 struct net_device *ndev = wil_to_ndev(wil); 570 struct net_device *ndev = wil_to_ndev(wil);
543 struct wmi_wbe_link_down_event *evt = d; 571 struct wmi_wbe_link_down_event *evt = d;
572 u8 cid = evt->cid;
544 573
545 wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n", 574 wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
546 evt->cid, le32_to_cpu(evt->reason)); 575 cid, le32_to_cpu(evt->reason));
576
577 if (cid >= ARRAY_SIZE(wil->sta)) {
578 wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
579 return;
580 }
547 581
582 wil->sta[cid].data_port_open = false;
548 netif_carrier_off(ndev); 583 netif_carrier_off(ndev);
549} 584}
550 585
@@ -552,10 +587,42 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
552 int len) 587 int len)
553{ 588{
554 struct wmi_vring_ba_status_event *evt = d; 589 struct wmi_vring_ba_status_event *evt = d;
590 struct wil_sta_info *sta;
591 uint i, cid;
592
593 /* TODO: use Rx BA status, not Tx one */
555 594
556 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n", 595 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
557 evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize, 596 evt->ringid,
558 __le16_to_cpu(evt->ba_timeout)); 597 evt->status == WMI_BA_AGREED ? "OK" : "N/A",
598 evt->agg_wsize, __le16_to_cpu(evt->ba_timeout));
599
600 if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
601 wil_err(wil, "invalid ring id %d\n", evt->ringid);
602 return;
603 }
604
605 cid = wil->vring2cid_tid[evt->ringid][0];
606 if (cid >= WIL6210_MAX_CID) {
607 wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid);
608 return;
609 }
610
611 sta = &wil->sta[cid];
612 if (sta->status == wil_sta_unused) {
613 wil_err(wil, "CID %d unused\n", cid);
614 return;
615 }
616
617 wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr);
618 for (i = 0; i < WIL_STA_TID_NUM; i++) {
619 struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
620 sta->tid_rx[i] = NULL;
621 wil_tid_ampdu_rx_free(wil, r);
622 if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize)
623 sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil,
624 evt->agg_wsize, 0);
625 }
559} 626}
560 627
561static const struct { 628static const struct {
@@ -893,6 +960,38 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
893 return rc; 960 return rc;
894} 961}
895 962
963/**
964 * wmi_rxon - turn radio on/off
965 * @on: turn on if true, off otherwise
966 *
967 * Only switch radio. Channel should be set separately.
968 * No timeout for rxon - radio turned on forever unless some other call
969 * turns it off
970 */
971int wmi_rxon(struct wil6210_priv *wil, bool on)
972{
973 int rc;
974 struct {
975 struct wil6210_mbox_hdr_wmi wmi;
976 struct wmi_listen_started_event evt;
977 } __packed reply;
978
979 wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
980
981 if (on) {
982 rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
983 WMI_LISTEN_STARTED_EVENTID,
984 &reply, sizeof(reply), 100);
985 if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
986 rc = -EINVAL;
987 } else {
988 rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
989 WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
990 }
991
992 return rc;
993}
994
896int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) 995int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
897{ 996{
898 struct wireless_dev *wdev = wil->wdev; 997 struct wireless_dev *wdev = wil->wdev;
@@ -906,6 +1005,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
906 }, 1005 },
907 .mid = 0, /* TODO - what is it? */ 1006 .mid = 0, /* TODO - what is it? */
908 .decap_trans_type = WMI_DECAP_TYPE_802_3, 1007 .decap_trans_type = WMI_DECAP_TYPE_802_3,
1008 .reorder_type = WMI_RX_SW_REORDER,
909 }; 1009 };
910 struct { 1010 struct {
911 struct wil6210_mbox_hdr_wmi wmi; 1011 struct wil6210_mbox_hdr_wmi wmi;
@@ -973,6 +1073,18 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
973 return 0; 1073 return 0;
974} 1074}
975 1075
1076int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
1077{
1078 struct wmi_disconnect_sta_cmd cmd = {
1079 .disconnect_reason = cpu_to_le16(reason),
1080 };
1081 memcpy(cmd.dst_mac, mac, ETH_ALEN);
1082
1083 wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
1084
1085 return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
1086}
1087
976void wmi_event_flush(struct wil6210_priv *wil) 1088void wmi_event_flush(struct wil6210_priv *wil)
977{ 1089{
978 struct pending_wmi_event *evt, *t; 1090 struct pending_wmi_event *evt, *t;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index bf93ea859f2d..1fe41af81a59 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -67,7 +67,7 @@
67#include <linux/moduleparam.h> 67#include <linux/moduleparam.h>
68#include <linux/firmware.h> 68#include <linux/firmware.h>
69#include <linux/jiffies.h> 69#include <linux/jiffies.h>
70#include <linux/ieee80211.h> 70#include <net/cfg80211.h>
71#include "atmel.h" 71#include "atmel.h"
72 72
73#define DRIVER_MAJOR 0 73#define DRIVER_MAJOR 0
@@ -2273,7 +2273,7 @@ static int atmel_set_freq(struct net_device *dev,
2273 2273
2274 /* Hack to fall through... */ 2274 /* Hack to fall through... */
2275 fwrq->e = 0; 2275 fwrq->e = 0;
2276 fwrq->m = ieee80211_freq_to_dsss_chan(f); 2276 fwrq->m = ieee80211_frequency_to_channel(f);
2277 } 2277 }
2278 /* Setting by channel number */ 2278 /* Setting by channel number */
2279 if ((fwrq->m > 1000) || (fwrq->e > 0)) 2279 if ((fwrq->m > 1000) || (fwrq->e > 0))
@@ -2434,8 +2434,8 @@ static int atmel_get_range(struct net_device *dev,
2434 range->freq[k].i = i; /* List index */ 2434 range->freq[k].i = i; /* List index */
2435 2435
2436 /* Values in MHz -> * 10^5 * 10 */ 2436 /* Values in MHz -> * 10^5 * 10 */
2437 range->freq[k].m = (ieee80211_dsss_chan_to_freq(i) * 2437 range->freq[k].m = 100000 *
2438 100000); 2438 ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
2439 range->freq[k++].e = 1; 2439 range->freq[k++].e = 1;
2440 } 2440 }
2441 range->num_frequency = k; 2441 range->num_frequency = k;
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 51ff0b198d0a..088d544ec63f 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -92,7 +92,7 @@ config B43_SDIO
92# if we can do DMA. 92# if we can do DMA.
93config B43_BCMA_PIO 93config B43_BCMA_PIO
94 bool 94 bool
95 depends on B43_BCMA 95 depends on B43 && B43_BCMA
96 select BCMA_BLOCKIO 96 select BCMA_BLOCKIO
97 default y 97 default y
98 98
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index 822aad8842f4..50517b801cb4 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -86,7 +86,7 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
86 86
87static inline bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) 87static inline bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
88{ 88{
89 return 0; 89 return false;
90} 90}
91 91
92static inline void b43_debugfs_init(void) 92static inline void b43_debugfs_init(void)
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c75237eb55a1..69fc3d65531a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1549,7 +1549,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1549 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon); 1549 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
1550 1550
1551 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); 1551 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
1552 len = min((size_t) dev->wl->current_beacon->len, 1552 len = min_t(size_t, dev->wl->current_beacon->len,
1553 0x200 - sizeof(struct b43_plcp_hdr6)); 1553 0x200 - sizeof(struct b43_plcp_hdr6));
1554 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value; 1554 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
1555 1555
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index abac25ee958d..f476fc337d64 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -58,41 +58,6 @@ enum b43_verbosity {
58#endif 58#endif
59}; 59};
60 60
61
62/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
63static inline u8 b43_freq_to_channel_5ghz(int freq)
64{
65 return ((freq - 5000) / 5);
66}
67static inline u8 b43_freq_to_channel_2ghz(int freq)
68{
69 u8 channel;
70
71 if (freq == 2484)
72 channel = 14;
73 else
74 channel = (freq - 2407) / 5;
75
76 return channel;
77}
78
79/* Lightweight function to convert a channel number to a frequency (in Mhz). */
80static inline int b43_channel_to_freq_5ghz(u8 channel)
81{
82 return (5000 + (5 * channel));
83}
84static inline int b43_channel_to_freq_2ghz(u8 channel)
85{
86 int freq;
87
88 if (channel == 14)
89 freq = 2484;
90 else
91 freq = 2407 + (5 * channel);
92
93 return freq;
94}
95
96static inline int b43_is_cck_rate(int rate) 61static inline int b43_is_cck_rate(int rate)
97{ 62{
98 return (rate == B43_CCK_RATE_1MB || 63 return (rate == B43_CCK_RATE_1MB ||
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index f01676ac481b..dbaa51890198 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -133,9 +133,9 @@ void b43_phy_exit(struct b43_wldev *dev)
133bool b43_has_hardware_pctl(struct b43_wldev *dev) 133bool b43_has_hardware_pctl(struct b43_wldev *dev)
134{ 134{
135 if (!dev->phy.hardware_power_control) 135 if (!dev->phy.hardware_power_control)
136 return 0; 136 return false;
137 if (!dev->phy.ops->supports_hwpctl) 137 if (!dev->phy.ops->supports_hwpctl)
138 return 0; 138 return false;
139 return dev->phy.ops->supports_hwpctl(dev); 139 return dev->phy.ops->supports_hwpctl(dev);
140} 140}
141 141
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index a73ff8c9deb5..a4ff5e2a42b9 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -637,7 +637,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
637 637
638 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); 638 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
639 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY)) 639 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
640 return 0; 640 return false;
641 b43_piorx_write32(q, B43_PIO8_RXCTL, 641 b43_piorx_write32(q, B43_PIO8_RXCTL,
642 B43_PIO8_RXCTL_FRAMERDY); 642 B43_PIO8_RXCTL_FRAMERDY);
643 for (i = 0; i < 10; i++) { 643 for (i = 0; i < 10; i++) {
@@ -651,7 +651,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
651 651
652 ctl = b43_piorx_read16(q, B43_PIO_RXCTL); 652 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
653 if (!(ctl & B43_PIO_RXCTL_FRAMERDY)) 653 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
654 return 0; 654 return false;
655 b43_piorx_write16(q, B43_PIO_RXCTL, 655 b43_piorx_write16(q, B43_PIO_RXCTL,
656 B43_PIO_RXCTL_FRAMERDY); 656 B43_PIO_RXCTL_FRAMERDY);
657 for (i = 0; i < 10; i++) { 657 for (i = 0; i < 10; i++) {
@@ -662,7 +662,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
662 } 662 }
663 } 663 }
664 b43dbg(q->dev->wl, "PIO RX timed out\n"); 664 b43dbg(q->dev->wl, "PIO RX timed out\n");
665 return 1; 665 return true;
666data_ready: 666data_ready:
667 667
668 /* Get the preamble (RX header) */ 668 /* Get the preamble (RX header) */
@@ -759,7 +759,7 @@ data_ready:
759 759
760 b43_rx(q->dev, skb, rxhdr); 760 b43_rx(q->dev, skb, rxhdr);
761 761
762 return 1; 762 return true;
763 763
764rx_error: 764rx_error:
765 if (err_msg) 765 if (err_msg)
@@ -769,7 +769,7 @@ rx_error:
769 else 769 else
770 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); 770 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
771 771
772 return 1; 772 return true;
773} 773}
774 774
775void b43_pio_rx(struct b43_pio_rxqueue *q) 775void b43_pio_rx(struct b43_pio_rxqueue *q)
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 8e8431d4eb0c..3190493bd07f 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -40,7 +40,7 @@ static int get_integer(const char *buf, size_t count)
40 40
41 if (count == 0) 41 if (count == 0)
42 goto out; 42 goto out;
43 count = min(count, (size_t) 10); 43 count = min_t(size_t, count, 10);
44 memcpy(tmp, buf, count); 44 memcpy(tmp, buf, count);
45 ret = simple_strtol(tmp, NULL, 10); 45 ret = simple_strtol(tmp, NULL, 10);
46 out: 46 out:
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 50e5ddb12fb3..31adb8cf0291 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -337,7 +337,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
337 /* iv16 */ 337 /* iv16 */
338 memcpy(txhdr->iv + 10, ((u8 *) wlhdr) + wlhdr_len, 3); 338 memcpy(txhdr->iv + 10, ((u8 *) wlhdr) + wlhdr_len, 3);
339 } else { 339 } else {
340 iv_len = min((size_t) info->control.hw_key->iv_len, 340 iv_len = min_t(size_t, info->control.hw_key->iv_len,
341 ARRAY_SIZE(txhdr->iv)); 341 ARRAY_SIZE(txhdr->iv));
342 memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len); 342 memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
343 } 343 }
@@ -806,7 +806,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
806 B43_WARN_ON(1); 806 B43_WARN_ON(1);
807 /* FIXME: We don't really know which value the "chanid" contains. 807 /* FIXME: We don't really know which value the "chanid" contains.
808 * So the following assignment might be wrong. */ 808 * So the following assignment might be wrong. */
809 status.freq = b43_channel_to_freq_5ghz(chanid); 809 status.freq =
810 ieee80211_channel_to_frequency(chanid, status.band);
810 break; 811 break;
811 case B43_PHYTYPE_G: 812 case B43_PHYTYPE_G:
812 status.band = IEEE80211_BAND_2GHZ; 813 status.band = IEEE80211_BAND_2GHZ;
@@ -819,13 +820,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
819 case B43_PHYTYPE_HT: 820 case B43_PHYTYPE_HT:
820 /* chanid is the SHM channel cookie. Which is the plain 821 /* chanid is the SHM channel cookie. Which is the plain
821 * channel number in b43. */ 822 * channel number in b43. */
822 if (chanstat & B43_RX_CHAN_5GHZ) { 823 if (chanstat & B43_RX_CHAN_5GHZ)
823 status.band = IEEE80211_BAND_5GHZ; 824 status.band = IEEE80211_BAND_5GHZ;
824 status.freq = b43_channel_to_freq_5ghz(chanid); 825 else
825 } else {
826 status.band = IEEE80211_BAND_2GHZ; 826 status.band = IEEE80211_BAND_2GHZ;
827 status.freq = b43_channel_to_freq_2ghz(chanid); 827 status.freq =
828 } 828 ieee80211_channel_to_frequency(chanid, status.band);
829 break; 829 break;
830 default: 830 default:
831 B43_WARN_ON(1); 831 B43_WARN_ON(1);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 349c77605231..1aec2146a2bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -978,7 +978,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
978 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon); 978 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
979 979
980 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); 980 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
981 len = min((size_t)dev->wl->current_beacon->len, 981 len = min_t(size_t, dev->wl->current_beacon->len,
982 0x200 - sizeof(struct b43legacy_plcp_hdr6)); 982 0x200 - sizeof(struct b43legacy_plcp_hdr6));
983 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value; 983 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
984 984
@@ -1155,7 +1155,7 @@ static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
1155 b43legacy_write_probe_resp_plcp(dev, 0x350, size, 1155 b43legacy_write_probe_resp_plcp(dev, 0x350, size,
1156 &b43legacy_b_ratetable[3]); 1156 &b43legacy_b_ratetable[3]);
1157 1157
1158 size = min((size_t)size, 1158 size = min_t(size_t, size,
1159 0x200 - sizeof(struct b43legacy_plcp_hdr6)); 1159 0x200 - sizeof(struct b43legacy_plcp_hdr6));
1160 b43legacy_write_template_common(dev, probe_resp_data, 1160 b43legacy_write_template_common(dev, probe_resp_data,
1161 size, ram_offset, 1161 size, ram_offset,
diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/b43legacy/sysfs.c
index 57f8b089767c..2a1da15c913b 100644
--- a/drivers/net/wireless/b43legacy/sysfs.c
+++ b/drivers/net/wireless/b43legacy/sysfs.c
@@ -42,7 +42,7 @@ static int get_integer(const char *buf, size_t count)
42 42
43 if (count == 0) 43 if (count == 0)
44 goto out; 44 goto out;
45 count = min(count, (size_t)10); 45 count = min_t(size_t, count, 10);
46 memcpy(tmp, buf, count); 46 memcpy(tmp, buf, count);
47 ret = simple_strtol(tmp, NULL, 10); 47 ret = simple_strtol(tmp, NULL, 10);
48out: 48out:
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 86588c9ff0f2..34bf3f0b729f 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -254,7 +254,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
254 B43legacy_TX4_MAC_KEYALG_SHIFT) & 254 B43legacy_TX4_MAC_KEYALG_SHIFT) &
255 B43legacy_TX4_MAC_KEYALG; 255 B43legacy_TX4_MAC_KEYALG;
256 wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control); 256 wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
257 iv_len = min((size_t)info->control.hw_key->iv_len, 257 iv_len = min_t(size_t, info->control.hw_key->iv_len,
258 ARRAY_SIZE(txhdr->iv)); 258 ARRAY_SIZE(txhdr->iv));
259 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); 259 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
260 } else { 260 } else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 57cddee03252..1d2ceac3a221 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
24obj-$(CONFIG_BRCMFMAC) += brcmfmac.o 24obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
25brcmfmac-objs += \ 25brcmfmac-objs += \
26 wl_cfg80211.o \ 26 wl_cfg80211.o \
27 chip.o \
27 fwil.o \ 28 fwil.o \
28 fweh.o \ 29 fweh.o \
29 fwsignal.o \ 30 fwsignal.o \
@@ -36,8 +37,7 @@ brcmfmac-objs += \
36 btcoex.o 37 btcoex.o
37brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ 38brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
38 dhd_sdio.o \ 39 dhd_sdio.o \
39 bcmsdh.o \ 40 bcmsdh.o
40 sdio_chip.o
41brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ 41brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
42 usb.o 42 usb.o
43brcmfmac-$(CONFIG_BRCMDBG) += \ 43brcmfmac-$(CONFIG_BRCMDBG) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index fa35b23bbaa7..a16e644e7c08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -43,7 +43,6 @@
43#include "dhd_bus.h" 43#include "dhd_bus.h"
44#include "dhd_dbg.h" 44#include "dhd_dbg.h"
45#include "sdio_host.h" 45#include "sdio_host.h"
46#include "sdio_chip.h"
47 46
48#define SDIOH_API_ACCESS_RETRY_LIMIT 2 47#define SDIOH_API_ACCESS_RETRY_LIMIT 2
49 48
@@ -54,6 +53,12 @@
54/* Maximum milliseconds to wait for F2 to come up */ 53/* Maximum milliseconds to wait for F2 to come up */
55#define SDIO_WAIT_F2RDY 3000 54#define SDIO_WAIT_F2RDY 3000
56 55
56#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
57#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
58
59static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
60module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
61MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
57 62
58static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id) 63static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
59{ 64{
@@ -264,26 +269,17 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
264 break; 269 break;
265 } 270 }
266 271
267 if (ret) { 272 if (ret)
268 /* 273 brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
269 * SleepCSR register access can fail when 274 write ? "write" : "read", fn, addr, ret);
270 * waking up the device so reduce this noise 275
271 * in the logs.
272 */
273 if (addr != SBSDIO_FUNC1_SLEEPCSR)
274 brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
275 write ? "write" : "read", fn, addr, ret);
276 else
277 brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
278 write ? "write" : "read", fn, addr, ret);
279 }
280 return ret; 276 return ret;
281} 277}
282 278
283static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, 279static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
284 u8 regsz, void *data, bool write) 280 u8 regsz, void *data, bool write)
285{ 281{
286 u8 func_num; 282 u8 func;
287 s32 retry = 0; 283 s32 retry = 0;
288 int ret; 284 int ret;
289 285
@@ -297,9 +293,9 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
297 * The rest: function 1 silicon backplane core registers 293 * The rest: function 1 silicon backplane core registers
298 */ 294 */
299 if ((addr & ~REG_F0_REG_MASK) == 0) 295 if ((addr & ~REG_F0_REG_MASK) == 0)
300 func_num = SDIO_FUNC_0; 296 func = SDIO_FUNC_0;
301 else 297 else
302 func_num = SDIO_FUNC_1; 298 func = SDIO_FUNC_1;
303 299
304 do { 300 do {
305 if (!write) 301 if (!write)
@@ -307,16 +303,26 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
307 /* for retry wait for 1 ms till bus get settled down */ 303 /* for retry wait for 1 ms till bus get settled down */
308 if (retry) 304 if (retry)
309 usleep_range(1000, 2000); 305 usleep_range(1000, 2000);
310 ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz, 306 ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
311 data, write); 307 data, write);
312 } while (ret != 0 && ret != -ENOMEDIUM && 308 } while (ret != 0 && ret != -ENOMEDIUM &&
313 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); 309 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
314 310
315 if (ret == -ENOMEDIUM) 311 if (ret == -ENOMEDIUM)
316 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM); 312 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
317 else if (ret != 0) 313 else if (ret != 0) {
318 brcmf_err("failed with %d\n", ret); 314 /*
319 315 * SleepCSR register access can fail when
316 * waking up the device so reduce this noise
317 * in the logs.
318 */
319 if (addr != SBSDIO_FUNC1_SLEEPCSR)
320 brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
321 write ? "write" : "read", func, addr, ret);
322 else
323 brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
324 write ? "write" : "read", func, addr, ret);
325 }
320 return ret; 326 return ret;
321} 327}
322 328
@@ -488,7 +494,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
488 struct mmc_request mmc_req; 494 struct mmc_request mmc_req;
489 struct mmc_command mmc_cmd; 495 struct mmc_command mmc_cmd;
490 struct mmc_data mmc_dat; 496 struct mmc_data mmc_dat;
491 struct sg_table st;
492 struct scatterlist *sgl; 497 struct scatterlist *sgl;
493 int ret = 0; 498 int ret = 0;
494 499
@@ -533,16 +538,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
533 pkt_offset = 0; 538 pkt_offset = 0;
534 pkt_next = target_list->next; 539 pkt_next = target_list->next;
535 540
536 if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
537 ret = -ENOMEM;
538 goto exit;
539 }
540
541 memset(&mmc_req, 0, sizeof(struct mmc_request)); 541 memset(&mmc_req, 0, sizeof(struct mmc_request));
542 memset(&mmc_cmd, 0, sizeof(struct mmc_command)); 542 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
543 memset(&mmc_dat, 0, sizeof(struct mmc_data)); 543 memset(&mmc_dat, 0, sizeof(struct mmc_data));
544 544
545 mmc_dat.sg = st.sgl; 545 mmc_dat.sg = sdiodev->sgtable.sgl;
546 mmc_dat.blksz = func_blk_sz; 546 mmc_dat.blksz = func_blk_sz;
547 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 547 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
548 mmc_cmd.opcode = SD_IO_RW_EXTENDED; 548 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
@@ -558,7 +558,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
558 while (seg_sz) { 558 while (seg_sz) {
559 req_sz = 0; 559 req_sz = 0;
560 sg_cnt = 0; 560 sg_cnt = 0;
561 sgl = st.sgl; 561 sgl = sdiodev->sgtable.sgl;
562 /* prep sg table */ 562 /* prep sg table */
563 while (pkt_next != (struct sk_buff *)target_list) { 563 while (pkt_next != (struct sk_buff *)target_list) {
564 pkt_data = pkt_next->data + pkt_offset; 564 pkt_data = pkt_next->data + pkt_offset;
@@ -640,7 +640,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
640 } 640 }
641 641
642exit: 642exit:
643 sg_free_table(&st); 643 sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
644 while ((pkt_next = __skb_dequeue(&local_list)) != NULL) 644 while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
645 brcmu_pkt_buf_free_skb(pkt_next); 645 brcmu_pkt_buf_free_skb(pkt_next);
646 646
@@ -827,7 +827,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
827 } 827 }
828 if (!write) 828 if (!write)
829 memcpy(data, pkt->data, dsize); 829 memcpy(data, pkt->data, dsize);
830 skb_trim(pkt, dsize); 830 skb_trim(pkt, 0);
831 831
832 /* Adjust for next transfer (if any) */ 832 /* Adjust for next transfer (if any) */
833 size -= dsize; 833 size -= dsize;
@@ -864,6 +864,29 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
864 return 0; 864 return 0;
865} 865}
866 866
867static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
868{
869 uint nents;
870 int err;
871
872 if (!sdiodev->sg_support)
873 return;
874
875 nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
876 nents += (nents >> 4) + 1;
877
878 WARN_ON(nents > sdiodev->max_segment_count);
879
880 brcmf_dbg(TRACE, "nents=%d\n", nents);
881 err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
882 if (err < 0) {
883 brcmf_err("allocation failed: disable scatter-gather");
884 sdiodev->sg_support = false;
885 }
886
887 sdiodev->txglomsz = brcmf_sdiod_txglomsz;
888}
889
867static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) 890static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
868{ 891{
869 if (sdiodev->bus) { 892 if (sdiodev->bus) {
@@ -881,6 +904,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
881 sdio_disable_func(sdiodev->func[1]); 904 sdio_disable_func(sdiodev->func[1]);
882 sdio_release_host(sdiodev->func[1]); 905 sdio_release_host(sdiodev->func[1]);
883 906
907 sg_free_table(&sdiodev->sgtable);
884 sdiodev->sbwad = 0; 908 sdiodev->sbwad = 0;
885 909
886 return 0; 910 return 0;
@@ -936,6 +960,11 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
936 SG_MAX_SINGLE_ALLOC); 960 SG_MAX_SINGLE_ALLOC);
937 sdiodev->max_segment_size = host->max_seg_size; 961 sdiodev->max_segment_size = host->max_seg_size;
938 962
963 /* allocate scatter-gather table. sg support
964 * will be disabled upon allocation failure.
965 */
966 brcmf_sdiod_sgtable_alloc(sdiodev);
967
939 /* try to attach to the target device */ 968 /* try to attach to the target device */
940 sdiodev->bus = brcmf_sdio_probe(sdiodev); 969 sdiodev->bus = brcmf_sdio_probe(sdiodev);
941 if (!sdiodev->bus) { 970 if (!sdiodev->bus) {
@@ -960,6 +989,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
960 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)}, 989 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
961 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, 990 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
962 SDIO_DEVICE_ID_BROADCOM_4335_4339)}, 991 SDIO_DEVICE_ID_BROADCOM_4335_4339)},
992 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
963 { /* end: all zeroes */ }, 993 { /* end: all zeroes */ },
964}; 994};
965MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 995MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1073,9 +1103,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
1073 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1103 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1074 int ret = 0; 1104 int ret = 0;
1075 1105
1076 brcmf_dbg(SDIO, "\n"); 1106 brcmf_dbg(SDIO, "Enter\n");
1077
1078 atomic_set(&sdiodev->suspend, true);
1079 1107
1080 sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]); 1108 sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
1081 if (!(sdio_flags & MMC_PM_KEEP_POWER)) { 1109 if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
@@ -1083,9 +1111,12 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
1083 return -EINVAL; 1111 return -EINVAL;
1084 } 1112 }
1085 1113
1114 atomic_set(&sdiodev->suspend, true);
1115
1086 ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER); 1116 ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
1087 if (ret) { 1117 if (ret) {
1088 brcmf_err("Failed to set pm_flags\n"); 1118 brcmf_err("Failed to set pm_flags\n");
1119 atomic_set(&sdiodev->suspend, false);
1089 return ret; 1120 return ret;
1090 } 1121 }
1091 1122
@@ -1099,6 +1130,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
1099 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1130 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1100 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1131 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1101 1132
1133 brcmf_dbg(SDIO, "Enter\n");
1102 brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS); 1134 brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
1103 atomic_set(&sdiodev->suspend, false); 1135 atomic_set(&sdiodev->suspend, false);
1104 return 0; 1136 return 0;
@@ -1115,14 +1147,15 @@ static struct sdio_driver brcmf_sdmmc_driver = {
1115 .remove = brcmf_ops_sdio_remove, 1147 .remove = brcmf_ops_sdio_remove,
1116 .name = BRCMFMAC_SDIO_PDATA_NAME, 1148 .name = BRCMFMAC_SDIO_PDATA_NAME,
1117 .id_table = brcmf_sdmmc_ids, 1149 .id_table = brcmf_sdmmc_ids,
1118#ifdef CONFIG_PM_SLEEP
1119 .drv = { 1150 .drv = {
1151 .owner = THIS_MODULE,
1152#ifdef CONFIG_PM_SLEEP
1120 .pm = &brcmf_sdio_pm_ops, 1153 .pm = &brcmf_sdio_pm_ops,
1121 },
1122#endif /* CONFIG_PM_SLEEP */ 1154#endif /* CONFIG_PM_SLEEP */
1155 },
1123}; 1156};
1124 1157
1125static int brcmf_sdio_pd_probe(struct platform_device *pdev) 1158static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
1126{ 1159{
1127 brcmf_dbg(SDIO, "Enter\n"); 1160 brcmf_dbg(SDIO, "Enter\n");
1128 1161
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
new file mode 100644
index 000000000000..df130ef53d1c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -0,0 +1,1034 @@
1/*
2 * Copyright (c) 2014 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/list.h>
19#include <linux/ssb/ssb_regs.h>
20#include <linux/bcma/bcma.h>
21#include <linux/bcma/bcma_regs.h>
22
23#include <defs.h>
24#include <soc.h>
25#include <brcm_hw_ids.h>
26#include <brcmu_utils.h>
27#include <chipcommon.h>
28#include "dhd_dbg.h"
29#include "chip.h"
30
31/* SOC Interconnect types (aka chip types) */
32#define SOCI_SB 0
33#define SOCI_AI 1
34
35/* PL-368 DMP definitions */
36#define DMP_DESC_TYPE_MSK 0x0000000F
37#define DMP_DESC_EMPTY 0x00000000
38#define DMP_DESC_VALID 0x00000001
39#define DMP_DESC_COMPONENT 0x00000001
40#define DMP_DESC_MASTER_PORT 0x00000003
41#define DMP_DESC_ADDRESS 0x00000005
42#define DMP_DESC_ADDRSIZE_GT32 0x00000008
43#define DMP_DESC_EOT 0x0000000F
44
45#define DMP_COMP_DESIGNER 0xFFF00000
46#define DMP_COMP_DESIGNER_S 20
47#define DMP_COMP_PARTNUM 0x000FFF00
48#define DMP_COMP_PARTNUM_S 8
49#define DMP_COMP_CLASS 0x000000F0
50#define DMP_COMP_CLASS_S 4
51#define DMP_COMP_REVISION 0xFF000000
52#define DMP_COMP_REVISION_S 24
53#define DMP_COMP_NUM_SWRAP 0x00F80000
54#define DMP_COMP_NUM_SWRAP_S 19
55#define DMP_COMP_NUM_MWRAP 0x0007C000
56#define DMP_COMP_NUM_MWRAP_S 14
57#define DMP_COMP_NUM_SPORT 0x00003E00
58#define DMP_COMP_NUM_SPORT_S 9
59#define DMP_COMP_NUM_MPORT 0x000001F0
60#define DMP_COMP_NUM_MPORT_S 4
61
62#define DMP_MASTER_PORT_UID 0x0000FF00
63#define DMP_MASTER_PORT_UID_S 8
64#define DMP_MASTER_PORT_NUM 0x000000F0
65#define DMP_MASTER_PORT_NUM_S 4
66
67#define DMP_SLAVE_ADDR_BASE 0xFFFFF000
68#define DMP_SLAVE_ADDR_BASE_S 12
69#define DMP_SLAVE_PORT_NUM 0x00000F00
70#define DMP_SLAVE_PORT_NUM_S 8
71#define DMP_SLAVE_TYPE 0x000000C0
72#define DMP_SLAVE_TYPE_S 6
73#define DMP_SLAVE_TYPE_SLAVE 0
74#define DMP_SLAVE_TYPE_BRIDGE 1
75#define DMP_SLAVE_TYPE_SWRAP 2
76#define DMP_SLAVE_TYPE_MWRAP 3
77#define DMP_SLAVE_SIZE_TYPE 0x00000030
78#define DMP_SLAVE_SIZE_TYPE_S 4
79#define DMP_SLAVE_SIZE_4K 0
80#define DMP_SLAVE_SIZE_8K 1
81#define DMP_SLAVE_SIZE_16K 2
82#define DMP_SLAVE_SIZE_DESC 3
83
84/* EROM CompIdentB */
85#define CIB_REV_MASK 0xff000000
86#define CIB_REV_SHIFT 24
87
88/* ARM CR4 core specific control flag bits */
89#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
90
91/* D11 core specific control flag bits */
92#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
93#define D11_BCMA_IOCTL_PHYRESET 0x0008
94
95/* chip core base & ramsize */
96/* bcm4329 */
97/* SDIO device core, ID 0x829 */
98#define BCM4329_CORE_BUS_BASE 0x18011000
99/* internal memory core, ID 0x80e */
100#define BCM4329_CORE_SOCRAM_BASE 0x18003000
101/* ARM Cortex M3 core, ID 0x82a */
102#define BCM4329_CORE_ARM_BASE 0x18002000
103#define BCM4329_RAMSIZE 0x48000
104
105/* bcm43143 */
106/* SDIO device core */
107#define BCM43143_CORE_BUS_BASE 0x18002000
108/* internal memory core */
109#define BCM43143_CORE_SOCRAM_BASE 0x18004000
110/* ARM Cortex M3 core, ID 0x82a */
111#define BCM43143_CORE_ARM_BASE 0x18003000
112#define BCM43143_RAMSIZE 0x70000
113
114#define CORE_SB(base, field) \
115 (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
116#define SBCOREREV(sbidh) \
117 ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
118 ((sbidh) & SSB_IDHIGH_RCLO))
119
120struct sbconfig {
121 u32 PAD[2];
122 u32 sbipsflag; /* initiator port ocp slave flag */
123 u32 PAD[3];
124 u32 sbtpsflag; /* target port ocp slave flag */
125 u32 PAD[11];
126 u32 sbtmerrloga; /* (sonics >= 2.3) */
127 u32 PAD;
128 u32 sbtmerrlog; /* (sonics >= 2.3) */
129 u32 PAD[3];
130 u32 sbadmatch3; /* address match3 */
131 u32 PAD;
132 u32 sbadmatch2; /* address match2 */
133 u32 PAD;
134 u32 sbadmatch1; /* address match1 */
135 u32 PAD[7];
136 u32 sbimstate; /* initiator agent state */
137 u32 sbintvec; /* interrupt mask */
138 u32 sbtmstatelow; /* target state */
139 u32 sbtmstatehigh; /* target state */
140 u32 sbbwa0; /* bandwidth allocation table0 */
141 u32 PAD;
142 u32 sbimconfiglow; /* initiator configuration */
143 u32 sbimconfighigh; /* initiator configuration */
144 u32 sbadmatch0; /* address match0 */
145 u32 PAD;
146 u32 sbtmconfiglow; /* target configuration */
147 u32 sbtmconfighigh; /* target configuration */
148 u32 sbbconfig; /* broadcast configuration */
149 u32 PAD;
150 u32 sbbstate; /* broadcast state */
151 u32 PAD[3];
152 u32 sbactcnfg; /* activate configuration */
153 u32 PAD[3];
154 u32 sbflagst; /* current sbflags */
155 u32 PAD[3];
156 u32 sbidlow; /* identification */
157 u32 sbidhigh; /* identification */
158};
159
160struct brcmf_core_priv {
161 struct brcmf_core pub;
162 u32 wrapbase;
163 struct list_head list;
164 struct brcmf_chip_priv *chip;
165};
166
167/* ARM CR4 core specific control flag bits */
168#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
169
170/* D11 core specific control flag bits */
171#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
172#define D11_BCMA_IOCTL_PHYRESET 0x0008
173
174struct brcmf_chip_priv {
175 struct brcmf_chip pub;
176 const struct brcmf_buscore_ops *ops;
177 void *ctx;
178 /* assured first core is chipcommon, second core is buscore */
179 struct list_head cores;
180 u16 num_cores;
181
182 bool (*iscoreup)(struct brcmf_core_priv *core);
183 void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
184 u32 reset);
185 void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
186 u32 postreset);
187};
188
189static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
190 struct brcmf_core *core)
191{
192 u32 regdata;
193
194 regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
195 core->rev = SBCOREREV(regdata);
196}
197
198static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
199{
200 struct brcmf_chip_priv *ci;
201 u32 regdata;
202 u32 address;
203
204 ci = core->chip;
205 address = CORE_SB(core->pub.base, sbtmstatelow);
206 regdata = ci->ops->read32(ci->ctx, address);
207 regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
208 SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
209 return SSB_TMSLOW_CLOCK == regdata;
210}
211
212static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
213{
214 struct brcmf_chip_priv *ci;
215 u32 regdata;
216 bool ret;
217
218 ci = core->chip;
219 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
220 ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
221
222 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
223 ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
224
225 return ret;
226}
227
228static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
229 u32 prereset, u32 reset)
230{
231 struct brcmf_chip_priv *ci;
232 u32 val, base;
233
234 ci = core->chip;
235 base = core->pub.base;
236 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
237 if (val & SSB_TMSLOW_RESET)
238 return;
239
240 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
241 if ((val & SSB_TMSLOW_CLOCK) != 0) {
242 /*
243 * set target reject and spin until busy is clear
244 * (preserve core-specific bits)
245 */
246 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
247 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
248 val | SSB_TMSLOW_REJECT);
249
250 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
251 udelay(1);
252 SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
253 & SSB_TMSHIGH_BUSY), 100000);
254
255 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
256 if (val & SSB_TMSHIGH_BUSY)
257 brcmf_err("core state still busy\n");
258
259 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
260 if (val & SSB_IDLOW_INITIATOR) {
261 val = ci->ops->read32(ci->ctx,
262 CORE_SB(base, sbimstate));
263 val |= SSB_IMSTATE_REJECT;
264 ci->ops->write32(ci->ctx,
265 CORE_SB(base, sbimstate), val);
266 val = ci->ops->read32(ci->ctx,
267 CORE_SB(base, sbimstate));
268 udelay(1);
269 SPINWAIT((ci->ops->read32(ci->ctx,
270 CORE_SB(base, sbimstate)) &
271 SSB_IMSTATE_BUSY), 100000);
272 }
273
274 /* set reset and reject while enabling the clocks */
275 val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
276 SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
277 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
278 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
279 udelay(10);
280
281 /* clear the initiator reject bit */
282 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
283 if (val & SSB_IDLOW_INITIATOR) {
284 val = ci->ops->read32(ci->ctx,
285 CORE_SB(base, sbimstate));
286 val &= ~SSB_IMSTATE_REJECT;
287 ci->ops->write32(ci->ctx,
288 CORE_SB(base, sbimstate), val);
289 }
290 }
291
292 /* leave reset and reject asserted */
293 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
294 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
295 udelay(1);
296}
297
298static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
299 u32 prereset, u32 reset)
300{
301 struct brcmf_chip_priv *ci;
302 u32 regdata;
303
304 ci = core->chip;
305
306 /* if core is already in reset, just return */
307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
308 if ((regdata & BCMA_RESET_CTL_RESET) != 0)
309 return;
310
311 /* configure reset */
312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
313 prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
314 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
315
316 /* put in reset */
317 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
318 BCMA_RESET_CTL_RESET);
319 usleep_range(10, 20);
320
321 /* wait till reset is 1 */
322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
323 BCMA_RESET_CTL_RESET, 300);
324
325 /* in-reset configure */
326 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
327 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
328 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
329}
330
331static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
332 u32 reset, u32 postreset)
333{
334 struct brcmf_chip_priv *ci;
335 u32 regdata;
336 u32 base;
337
338 ci = core->chip;
339 base = core->pub.base;
340 /*
341 * Must do the disable sequence first to work for
342 * arbitrary current core state.
343 */
344 brcmf_chip_sb_coredisable(core, 0, 0);
345
346 /*
347 * Now do the initialization sequence.
348 * set reset while enabling the clock and
349 * forcing them on throughout the core
350 */
351 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
352 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
353 SSB_TMSLOW_RESET);
354 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
355 udelay(1);
356
357 /* clear any serror */
358 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
359 if (regdata & SSB_TMSHIGH_SERR)
360 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
361
362 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
363 if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
364 regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
365 ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
366 }
367
368 /* clear reset and allow it to propagate throughout the core */
369 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
370 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
371 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
372 udelay(1);
373
374 /* leave clock enabled */
375 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
376 SSB_TMSLOW_CLOCK);
377 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
378 udelay(1);
379}
380
381static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
382 u32 reset, u32 postreset)
383{
384 struct brcmf_chip_priv *ci;
385 int count;
386
387 ci = core->chip;
388
389 /* must disable first to work for arbitrary current core state */
390 brcmf_chip_ai_coredisable(core, prereset, reset);
391
392 count = 0;
393 while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
394 BCMA_RESET_CTL_RESET) {
395 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
396 count++;
397 if (count > 50)
398 break;
399 usleep_range(40, 60);
400 }
401
402 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
403 postreset | BCMA_IOCTL_CLK);
404 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
405}
406
407static char *brcmf_chip_name(uint chipid, char *buf, uint len)
408{
409 const char *fmt;
410
411 fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
412 snprintf(buf, len, fmt, chipid);
413 return buf;
414}
415
416static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
417 u16 coreid, u32 base,
418 u32 wrapbase)
419{
420 struct brcmf_core_priv *core;
421
422 core = kzalloc(sizeof(*core), GFP_KERNEL);
423 if (!core)
424 return ERR_PTR(-ENOMEM);
425
426 core->pub.id = coreid;
427 core->pub.base = base;
428 core->chip = ci;
429 core->wrapbase = wrapbase;
430
431 list_add_tail(&core->list, &ci->cores);
432 return &core->pub;
433}
434
435#ifdef DEBUG
436/* safety check for chipinfo */
437static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
438{
439 struct brcmf_core_priv *core;
440 bool need_socram = false;
441 bool has_socram = false;
442 int idx = 1;
443
444 list_for_each_entry(core, &ci->cores, list) {
445 brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
446 idx++, core->pub.id, core->pub.rev, core->pub.base,
447 core->wrapbase);
448
449 switch (core->pub.id) {
450 case BCMA_CORE_ARM_CM3:
451 need_socram = true;
452 break;
453 case BCMA_CORE_INTERNAL_MEM:
454 has_socram = true;
455 break;
456 case BCMA_CORE_ARM_CR4:
457 if (ci->pub.rambase == 0) {
458 brcmf_err("RAM base not provided with ARM CR4 core\n");
459 return -ENOMEM;
460 }
461 break;
462 default:
463 break;
464 }
465 }
466
467 /* check RAM core presence for ARM CM3 core */
468 if (need_socram && !has_socram) {
469 brcmf_err("RAM core not provided with ARM CM3 core\n");
470 return -ENODEV;
471 }
472 return 0;
473}
474#else /* DEBUG */
475static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
476{
477 return 0;
478}
479#endif
480
481static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
482{
483 switch (ci->pub.chip) {
484 case BCM4329_CHIP_ID:
485 ci->pub.ramsize = BCM4329_RAMSIZE;
486 break;
487 case BCM43143_CHIP_ID:
488 ci->pub.ramsize = BCM43143_RAMSIZE;
489 break;
490 case BCM43241_CHIP_ID:
491 ci->pub.ramsize = 0x90000;
492 break;
493 case BCM4330_CHIP_ID:
494 ci->pub.ramsize = 0x48000;
495 break;
496 case BCM4334_CHIP_ID:
497 ci->pub.ramsize = 0x80000;
498 break;
499 case BCM4335_CHIP_ID:
500 ci->pub.ramsize = 0xc0000;
501 ci->pub.rambase = 0x180000;
502 break;
503 case BCM43362_CHIP_ID:
504 ci->pub.ramsize = 0x3c000;
505 break;
506 case BCM4339_CHIP_ID:
507 case BCM4354_CHIP_ID:
508 ci->pub.ramsize = 0xc0000;
509 ci->pub.rambase = 0x180000;
510 break;
511 default:
512 brcmf_err("unknown chip: %s\n", ci->pub.name);
513 break;
514 }
515}
516
517static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
518 u8 *type)
519{
520 u32 val;
521
522 /* read next descriptor */
523 val = ci->ops->read32(ci->ctx, *eromaddr);
524 *eromaddr += 4;
525
526 if (!type)
527 return val;
528
529 /* determine descriptor type */
530 *type = (val & DMP_DESC_TYPE_MSK);
531 if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
532 *type = DMP_DESC_ADDRESS;
533
534 return val;
535}
536
537static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
538 u32 *regbase, u32 *wrapbase)
539{
540 u8 desc;
541 u32 val;
542 u8 mpnum = 0;
543 u8 stype, sztype, wraptype;
544
545 *regbase = 0;
546 *wrapbase = 0;
547
548 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
549 if (desc == DMP_DESC_MASTER_PORT) {
550 mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
551 wraptype = DMP_SLAVE_TYPE_MWRAP;
552 } else if (desc == DMP_DESC_ADDRESS) {
553 /* revert erom address */
554 *eromaddr -= 4;
555 wraptype = DMP_SLAVE_TYPE_SWRAP;
556 } else {
557 *eromaddr -= 4;
558 return -EILSEQ;
559 }
560
561 do {
562 /* locate address descriptor */
563 do {
564 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
565 /* unexpected table end */
566 if (desc == DMP_DESC_EOT) {
567 *eromaddr -= 4;
568 return -EFAULT;
569 }
570 } while (desc != DMP_DESC_ADDRESS);
571
572 /* skip upper 32-bit address descriptor */
573 if (val & DMP_DESC_ADDRSIZE_GT32)
574 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
575
576 sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
577
578 /* next size descriptor can be skipped */
579 if (sztype == DMP_SLAVE_SIZE_DESC) {
580 val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
581 /* skip upper size descriptor if present */
582 if (val & DMP_DESC_ADDRSIZE_GT32)
583 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
584 }
585
586 /* only look for 4K register regions */
587 if (sztype != DMP_SLAVE_SIZE_4K)
588 continue;
589
590 stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
591
592 /* only regular slave and wrapper */
593 if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
594 *regbase = val & DMP_SLAVE_ADDR_BASE;
595 if (*wrapbase == 0 && stype == wraptype)
596 *wrapbase = val & DMP_SLAVE_ADDR_BASE;
597 } while (*regbase == 0 || *wrapbase == 0);
598
599 return 0;
600}
601
602static
603int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
604{
605 struct brcmf_core *core;
606 u32 eromaddr;
607 u8 desc_type = 0;
608 u32 val;
609 u16 id;
610 u8 nmp, nsp, nmw, nsw, rev;
611 u32 base, wrap;
612 int err;
613
614 eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
615
616 while (desc_type != DMP_DESC_EOT) {
617 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
618 if (!(val & DMP_DESC_VALID))
619 continue;
620
621 if (desc_type == DMP_DESC_EMPTY)
622 continue;
623
624 /* need a component descriptor */
625 if (desc_type != DMP_DESC_COMPONENT)
626 continue;
627
628 id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
629
630 /* next descriptor must be component as well */
631 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
632 if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
633 return -EFAULT;
634
635 /* only look at cores with master port(s) */
636 nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
637 nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
638 nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
639 nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
640 rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
641
642 /* need core with ports */
643 if (nmw + nsw == 0)
644 continue;
645
646 /* try to obtain register address info */
647 err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
648 if (err)
649 continue;
650
651 /* finally a core to be added */
652 core = brcmf_chip_add_core(ci, id, base, wrap);
653 if (IS_ERR(core))
654 return PTR_ERR(core);
655
656 core->rev = rev;
657 }
658
659 return 0;
660}
661
662static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
663{
664 struct brcmf_core *core;
665 u32 regdata;
666 u32 socitype;
667
668 /* Get CC core rev
669 * Chipid is assume to be at offset 0 from SI_ENUM_BASE
670 * For different chiptypes or old sdio hosts w/o chipcommon,
671 * other ways of recognition should be added here.
672 */
673 regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
674 ci->pub.chip = regdata & CID_ID_MASK;
675 ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
676 socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
677
678 brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
679 brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
680 socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
681 ci->pub.chiprev);
682
683 if (socitype == SOCI_SB) {
684 if (ci->pub.chip != BCM4329_CHIP_ID) {
685 brcmf_err("SB chip is not supported\n");
686 return -ENODEV;
687 }
688 ci->iscoreup = brcmf_chip_sb_iscoreup;
689 ci->coredisable = brcmf_chip_sb_coredisable;
690 ci->resetcore = brcmf_chip_sb_resetcore;
691
692 core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
693 SI_ENUM_BASE, 0);
694 brcmf_chip_sb_corerev(ci, core);
695 core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
696 BCM4329_CORE_BUS_BASE, 0);
697 brcmf_chip_sb_corerev(ci, core);
698 core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
699 BCM4329_CORE_SOCRAM_BASE, 0);
700 brcmf_chip_sb_corerev(ci, core);
701 core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
702 BCM4329_CORE_ARM_BASE, 0);
703 brcmf_chip_sb_corerev(ci, core);
704
705 core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
706 brcmf_chip_sb_corerev(ci, core);
707 } else if (socitype == SOCI_AI) {
708 ci->iscoreup = brcmf_chip_ai_iscoreup;
709 ci->coredisable = brcmf_chip_ai_coredisable;
710 ci->resetcore = brcmf_chip_ai_resetcore;
711
712 brcmf_chip_dmp_erom_scan(ci);
713 } else {
714 brcmf_err("chip backplane type %u is not supported\n",
715 socitype);
716 return -ENODEV;
717 }
718
719 brcmf_chip_get_raminfo(ci);
720
721 return brcmf_chip_cores_check(ci);
722}
723
724static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
725{
726 struct brcmf_core *core;
727 struct brcmf_core_priv *cr4;
728 u32 val;
729
730
731 core = brcmf_chip_get_core(&chip->pub, id);
732 if (!core)
733 return;
734
735 switch (id) {
736 case BCMA_CORE_ARM_CM3:
737 brcmf_chip_coredisable(core, 0, 0);
738 break;
739 case BCMA_CORE_ARM_CR4:
740 cr4 = container_of(core, struct brcmf_core_priv, pub);
741
742 /* clear all IOCTL bits except HALT bit */
743 val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
744 val &= ARMCR4_BCMA_IOCTL_CPUHALT;
745 brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
746 ARMCR4_BCMA_IOCTL_CPUHALT);
747 break;
748 default:
749 brcmf_err("unknown id: %u\n", id);
750 break;
751 }
752}
753
754static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
755{
756 struct brcmf_chip *pub;
757 struct brcmf_core_priv *cc;
758 u32 base;
759 u32 val;
760 int ret = 0;
761
762 pub = &chip->pub;
763 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
764 base = cc->pub.base;
765
766 /* get chipcommon capabilites */
767 pub->cc_caps = chip->ops->read32(chip->ctx,
768 CORE_CC_REG(base, capabilities));
769
770 /* get pmu caps & rev */
771 if (pub->cc_caps & CC_CAP_PMU) {
772 val = chip->ops->read32(chip->ctx,
773 CORE_CC_REG(base, pmucapabilities));
774 pub->pmurev = val & PCAP_REV_MASK;
775 pub->pmucaps = val;
776 }
777
778 brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
779 cc->pub.rev, pub->pmurev, pub->pmucaps);
780
781 /* execute bus core specific setup */
782 if (chip->ops->setup)
783 ret = chip->ops->setup(chip->ctx, pub);
784
785 /*
786 * Make sure any on-chip ARM is off (in case strapping is wrong),
787 * or downloaded code was already running.
788 */
789 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
790 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
791 return ret;
792}
793
794struct brcmf_chip *brcmf_chip_attach(void *ctx,
795 const struct brcmf_buscore_ops *ops)
796{
797 struct brcmf_chip_priv *chip;
798 int err = 0;
799
800 if (WARN_ON(!ops->read32))
801 err = -EINVAL;
802 if (WARN_ON(!ops->write32))
803 err = -EINVAL;
804 if (WARN_ON(!ops->prepare))
805 err = -EINVAL;
806 if (WARN_ON(!ops->exit_dl))
807 err = -EINVAL;
808 if (err < 0)
809 return ERR_PTR(-EINVAL);
810
811 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
812 if (!chip)
813 return ERR_PTR(-ENOMEM);
814
815 INIT_LIST_HEAD(&chip->cores);
816 chip->num_cores = 0;
817 chip->ops = ops;
818 chip->ctx = ctx;
819
820 err = ops->prepare(ctx);
821 if (err < 0)
822 goto fail;
823
824 err = brcmf_chip_recognition(chip);
825 if (err < 0)
826 goto fail;
827
828 err = brcmf_chip_setup(chip);
829 if (err < 0)
830 goto fail;
831
832 return &chip->pub;
833
834fail:
835 brcmf_chip_detach(&chip->pub);
836 return ERR_PTR(err);
837}
838
839void brcmf_chip_detach(struct brcmf_chip *pub)
840{
841 struct brcmf_chip_priv *chip;
842 struct brcmf_core_priv *core;
843 struct brcmf_core_priv *tmp;
844
845 chip = container_of(pub, struct brcmf_chip_priv, pub);
846 list_for_each_entry_safe(core, tmp, &chip->cores, list) {
847 list_del(&core->list);
848 kfree(core);
849 }
850 kfree(chip);
851}
852
853struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
854{
855 struct brcmf_chip_priv *chip;
856 struct brcmf_core_priv *core;
857
858 chip = container_of(pub, struct brcmf_chip_priv, pub);
859 list_for_each_entry(core, &chip->cores, list)
860 if (core->pub.id == coreid)
861 return &core->pub;
862
863 return NULL;
864}
865
866struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
867{
868 struct brcmf_chip_priv *chip;
869 struct brcmf_core_priv *cc;
870
871 chip = container_of(pub, struct brcmf_chip_priv, pub);
872 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
873 if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
874 return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
875 return &cc->pub;
876}
877
878bool brcmf_chip_iscoreup(struct brcmf_core *pub)
879{
880 struct brcmf_core_priv *core;
881
882 core = container_of(pub, struct brcmf_core_priv, pub);
883 return core->chip->iscoreup(core);
884}
885
886void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
887{
888 struct brcmf_core_priv *core;
889
890 core = container_of(pub, struct brcmf_core_priv, pub);
891 core->chip->coredisable(core, prereset, reset);
892}
893
894void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
895 u32 postreset)
896{
897 struct brcmf_core_priv *core;
898
899 core = container_of(pub, struct brcmf_core_priv, pub);
900 core->chip->resetcore(core, prereset, reset, postreset);
901}
902
903static void
904brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
905{
906 struct brcmf_core *core;
907
908 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
909 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
910 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
911 D11_BCMA_IOCTL_PHYCLOCKEN,
912 D11_BCMA_IOCTL_PHYCLOCKEN,
913 D11_BCMA_IOCTL_PHYCLOCKEN);
914 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
915 brcmf_chip_resetcore(core, 0, 0, 0);
916}
917
918static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
919{
920 struct brcmf_core *core;
921
922 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
923 if (!brcmf_chip_iscoreup(core)) {
924 brcmf_err("SOCRAM core is down after reset?\n");
925 return false;
926 }
927
928 chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
929
930 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
931 brcmf_chip_resetcore(core, 0, 0, 0);
932
933 return true;
934}
935
936static inline void
937brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
938{
939 struct brcmf_core *core;
940
941 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
942
943 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
944 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
945 D11_BCMA_IOCTL_PHYCLOCKEN,
946 D11_BCMA_IOCTL_PHYCLOCKEN,
947 D11_BCMA_IOCTL_PHYCLOCKEN);
948}
949
950static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
951{
952 struct brcmf_core *core;
953
954 chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
955
956 /* restore ARM */
957 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
958 brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
959
960 return true;
961}
962
963void brcmf_chip_enter_download(struct brcmf_chip *pub)
964{
965 struct brcmf_chip_priv *chip;
966 struct brcmf_core *arm;
967
968 brcmf_dbg(TRACE, "Enter\n");
969
970 chip = container_of(pub, struct brcmf_chip_priv, pub);
971 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
972 if (arm) {
973 brcmf_chip_cr4_enterdl(chip);
974 return;
975 }
976
977 brcmf_chip_cm3_enterdl(chip);
978}
979
980bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
981{
982 struct brcmf_chip_priv *chip;
983 struct brcmf_core *arm;
984
985 brcmf_dbg(TRACE, "Enter\n");
986
987 chip = container_of(pub, struct brcmf_chip_priv, pub);
988 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
989 if (arm)
990 return brcmf_chip_cr4_exitdl(chip, rstvec);
991
992 return brcmf_chip_cm3_exitdl(chip);
993}
994
995bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
996{
997 u32 base, addr, reg, pmu_cc3_mask = ~0;
998 struct brcmf_chip_priv *chip;
999
1000 brcmf_dbg(TRACE, "Enter\n");
1001
1002 /* old chips with PMU version less than 17 don't support save restore */
1003 if (pub->pmurev < 17)
1004 return false;
1005
1006 base = brcmf_chip_get_chipcommon(pub)->base;
1007 chip = container_of(pub, struct brcmf_chip_priv, pub);
1008
1009 switch (pub->chip) {
1010 case BCM4354_CHIP_ID:
1011 /* explicitly check SR engine enable bit */
1012 pmu_cc3_mask = BIT(2);
1013 /* fall-through */
1014 case BCM43241_CHIP_ID:
1015 case BCM4335_CHIP_ID:
1016 case BCM4339_CHIP_ID:
1017 /* read PMU chipcontrol register 3 */
1018 addr = CORE_CC_REG(base, chipcontrol_addr);
1019 chip->ops->write32(chip->ctx, addr, 3);
1020 addr = CORE_CC_REG(base, chipcontrol_data);
1021 reg = chip->ops->read32(chip->ctx, addr);
1022 return (reg & pmu_cc3_mask) != 0;
1023 default:
1024 addr = CORE_CC_REG(base, pmucapabilities_ext);
1025 reg = chip->ops->read32(chip->ctx, addr);
1026 if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
1027 return false;
1028
1029 addr = CORE_CC_REG(base, retention_ctl);
1030 reg = chip->ops->read32(chip->ctx, addr);
1031 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
1032 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
1033 }
1034}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
new file mode 100644
index 000000000000..c32908da90c8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2014 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef BRCMF_CHIP_H
17#define BRCMF_CHIP_H
18
19#include <linux/types.h>
20
21#define CORE_CC_REG(base, field) \
22 (base + offsetof(struct chipcregs, field))
23
24/**
25 * struct brcmf_chip - chip level information.
26 *
27 * @chip: chip identifier.
28 * @chiprev: chip revision.
29 * @cc_caps: chipcommon core capabilities.
30 * @pmucaps: PMU capabilities.
31 * @pmurev: PMU revision.
32 * @rambase: RAM base address (only applicable for ARM CR4 chips).
33 * @ramsize: amount of RAM on chip.
34 * @name: string representation of the chip identifier.
35 */
36struct brcmf_chip {
37 u32 chip;
38 u32 chiprev;
39 u32 cc_caps;
40 u32 pmucaps;
41 u32 pmurev;
42 u32 rambase;
43 u32 ramsize;
44 char name[8];
45};
46
47/**
48 * struct brcmf_core - core related information.
49 *
50 * @id: core identifier.
51 * @rev: core revision.
52 * @base: base address of core register space.
53 */
54struct brcmf_core {
55 u16 id;
56 u16 rev;
57 u32 base;
58};
59
60/**
61 * struct brcmf_buscore_ops - buscore specific callbacks.
62 *
63 * @read32: read 32-bit value over bus.
64 * @write32: write 32-bit value over bus.
65 * @prepare: prepare bus for core configuration.
66 * @setup: bus-specific core setup.
67 * @exit_dl: exit download state.
68 * The callback should use the provided @rstvec when non-zero.
69 */
70struct brcmf_buscore_ops {
71 u32 (*read32)(void *ctx, u32 addr);
72 void (*write32)(void *ctx, u32 addr, u32 value);
73 int (*prepare)(void *ctx);
74 int (*setup)(void *ctx, struct brcmf_chip *chip);
75 void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
76};
77
78struct brcmf_chip *brcmf_chip_attach(void *ctx,
79 const struct brcmf_buscore_ops *ops);
80void brcmf_chip_detach(struct brcmf_chip *chip);
81struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
82struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
83bool brcmf_chip_iscoreup(struct brcmf_core *core);
84void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
85void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
86 u32 postreset);
87void brcmf_chip_enter_download(struct brcmf_chip *ci);
88bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
89bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
90
91#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index d4d966beb840..7d28cd385092 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1040,12 +1040,12 @@ void brcmf_detach(struct device *dev)
1040 1040
1041 brcmf_cfg80211_detach(drvr->config); 1041 brcmf_cfg80211_detach(drvr->config);
1042 1042
1043 brcmf_fws_deinit(drvr);
1044
1043 brcmf_bus_detach(drvr); 1045 brcmf_bus_detach(drvr);
1044 1046
1045 brcmf_proto_detach(drvr); 1047 brcmf_proto_detach(drvr);
1046 1048
1047 brcmf_fws_deinit(drvr);
1048
1049 brcmf_debugfs_detach(drvr); 1049 brcmf_debugfs_detach(drvr);
1050 bus_if->drvr = NULL; 1050 bus_if->drvr = NULL;
1051 kfree(drvr); 1051 kfree(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index ddaa9efd053d..13c89a0c4ba7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/mmc/sdio.h> 25#include <linux/mmc/sdio.h>
26#include <linux/mmc/sdio_ids.h>
26#include <linux/mmc/sdio_func.h> 27#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/card.h> 28#include <linux/mmc/card.h>
28#include <linux/semaphore.h> 29#include <linux/semaphore.h>
@@ -40,7 +41,7 @@
40#include <brcm_hw_ids.h> 41#include <brcm_hw_ids.h>
41#include <soc.h> 42#include <soc.h>
42#include "sdio_host.h" 43#include "sdio_host.h"
43#include "sdio_chip.h" 44#include "chip.h"
44#include "nvram.h" 45#include "nvram.h"
45 46
46#define DCMD_RESP_TIMEOUT 2000 /* In milli second */ 47#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -112,8 +113,6 @@ struct rte_console {
112#define BRCMF_TXBOUND 20 /* Default for max tx frames in 113#define BRCMF_TXBOUND 20 /* Default for max tx frames in
113 one scheduling */ 114 one scheduling */
114 115
115#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
116
117#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */ 116#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
118 117
119#define MEMBLOCK 2048 /* Block size used for downloading 118#define MEMBLOCK 2048 /* Block size used for downloading
@@ -156,6 +155,34 @@ struct rte_console {
156/* manfid tuple length, include tuple, link bytes */ 155/* manfid tuple length, include tuple, link bytes */
157#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 156#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
158 157
158#define CORE_BUS_REG(base, field) \
159 (base + offsetof(struct sdpcmd_regs, field))
160
161/* SDIO function 1 register CHIPCLKCSR */
162/* Force ALP request to backplane */
163#define SBSDIO_FORCE_ALP 0x01
164/* Force HT request to backplane */
165#define SBSDIO_FORCE_HT 0x02
166/* Force ILP request to backplane */
167#define SBSDIO_FORCE_ILP 0x04
168/* Make ALP ready (power up xtal) */
169#define SBSDIO_ALP_AVAIL_REQ 0x08
170/* Make HT ready (power up PLL) */
171#define SBSDIO_HT_AVAIL_REQ 0x10
172/* Squelch clock requests from HW */
173#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
174/* Status: ALP is ready */
175#define SBSDIO_ALP_AVAIL 0x40
176/* Status: HT is ready */
177#define SBSDIO_HT_AVAIL 0x80
178#define SBSDIO_CSR_MASK 0x1F
179#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
180#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
181#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
182#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
183#define SBSDIO_CLKAV(regval, alponly) \
184 (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
185
159/* intstatus */ 186/* intstatus */
160#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ 187#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
161#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ 188#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
@@ -276,7 +303,6 @@ struct rte_console {
276/* Flags for SDH calls */ 303/* Flags for SDH calls */
277#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) 304#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
278 305
279#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
280#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change 306#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
281 * when idle 307 * when idle
282 */ 308 */
@@ -433,10 +459,11 @@ struct brcmf_sdio {
433 bool alp_only; /* Don't use HT clock (ALP only) */ 459 bool alp_only; /* Don't use HT clock (ALP only) */
434 460
435 u8 *ctrl_frame_buf; 461 u8 *ctrl_frame_buf;
436 u32 ctrl_frame_len; 462 u16 ctrl_frame_len;
437 bool ctrl_frame_stat; 463 bool ctrl_frame_stat;
438 464
439 spinlock_t txqlock; 465 spinlock_t txq_lock; /* protect bus->txq */
466 struct semaphore tx_seq_lock; /* protect bus->tx_seq */
440 wait_queue_head_t ctrl_wait; 467 wait_queue_head_t ctrl_wait;
441 wait_queue_head_t dcmd_resp_wait; 468 wait_queue_head_t dcmd_resp_wait;
442 469
@@ -483,16 +510,58 @@ static const uint max_roundup = 512;
483 510
484#define ALIGNMENT 4 511#define ALIGNMENT 4
485 512
486static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
487module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
488MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
489
490enum brcmf_sdio_frmtype { 513enum brcmf_sdio_frmtype {
491 BRCMF_SDIO_FT_NORMAL, 514 BRCMF_SDIO_FT_NORMAL,
492 BRCMF_SDIO_FT_SUPER, 515 BRCMF_SDIO_FT_SUPER,
493 BRCMF_SDIO_FT_SUB, 516 BRCMF_SDIO_FT_SUB,
494}; 517};
495 518
519#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
520
521/* SDIO Pad drive strength to select value mappings */
522struct sdiod_drive_str {
523 u8 strength; /* Pad Drive Strength in mA */
524 u8 sel; /* Chip-specific select value */
525};
526
527/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
528static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
529 {32, 0x6},
530 {26, 0x7},
531 {22, 0x4},
532 {16, 0x5},
533 {12, 0x2},
534 {8, 0x3},
535 {4, 0x0},
536 {0, 0x1}
537};
538
539/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
540static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
541 {6, 0x7},
542 {5, 0x6},
543 {4, 0x5},
544 {3, 0x4},
545 {2, 0x2},
546 {1, 0x1},
547 {0, 0x0}
548};
549
550/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
551static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
552 {3, 0x3},
553 {2, 0x2},
554 {1, 0x1},
555 {0, 0x0} };
556
557/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
558static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
559 {16, 0x7},
560 {12, 0x5},
561 {8, 0x3},
562 {4, 0x1}
563};
564
496#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin" 565#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
497#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt" 566#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
498#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin" 567#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
@@ -511,6 +580,8 @@ enum brcmf_sdio_frmtype {
511#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt" 580#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
512#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin" 581#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
513#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt" 582#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
583#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
584#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
514 585
515MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME); 586MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
516MODULE_FIRMWARE(BCM43143_NVRAM_NAME); 587MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@@ -530,6 +601,8 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
530MODULE_FIRMWARE(BCM43362_NVRAM_NAME); 601MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
531MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME); 602MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
532MODULE_FIRMWARE(BCM4339_NVRAM_NAME); 603MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
604MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
605MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
533 606
534struct brcmf_firmware_names { 607struct brcmf_firmware_names {
535 u32 chipid; 608 u32 chipid;
@@ -555,7 +628,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
555 { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) }, 628 { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
556 { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }, 629 { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
557 { BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) }, 630 { BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
558 { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) } 631 { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
632 { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
559}; 633};
560 634
561 635
@@ -618,27 +692,24 @@ static bool data_ok(struct brcmf_sdio *bus)
618 * Reads a register in the SDIO hardware block. This block occupies a series of 692 * Reads a register in the SDIO hardware block. This block occupies a series of
619 * adresses on the 32 bit backplane bus. 693 * adresses on the 32 bit backplane bus.
620 */ 694 */
621static int 695static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
622r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
623{ 696{
624 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); 697 struct brcmf_core *core;
625 int ret; 698 int ret;
626 699
627 *regvar = brcmf_sdiod_regrl(bus->sdiodev, 700 core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
628 bus->ci->c_inf[idx].base + offset, &ret); 701 *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
629 702
630 return ret; 703 return ret;
631} 704}
632 705
633static int 706static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
634w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
635{ 707{
636 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); 708 struct brcmf_core *core;
637 int ret; 709 int ret;
638 710
639 brcmf_sdiod_regwl(bus->sdiodev, 711 core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
640 bus->ci->c_inf[idx].base + reg_offset, 712 brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
641 regval, &ret);
642 713
643 return ret; 714 return ret;
644} 715}
@@ -650,16 +721,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
650 int err = 0; 721 int err = 0;
651 int try_cnt = 0; 722 int try_cnt = 0;
652 723
653 brcmf_dbg(TRACE, "Enter\n"); 724 brcmf_dbg(TRACE, "Enter: on=%d\n", on);
654 725
655 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); 726 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
656 /* 1st KSO write goes to AOS wake up core if device is asleep */ 727 /* 1st KSO write goes to AOS wake up core if device is asleep */
657 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, 728 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
658 wr_val, &err); 729 wr_val, &err);
659 if (err) {
660 brcmf_err("SDIO_AOS KSO write error: %d\n", err);
661 return err;
662 }
663 730
664 if (on) { 731 if (on) {
665 /* device WAKEUP through KSO: 732 /* device WAKEUP through KSO:
@@ -689,18 +756,22 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
689 &err); 756 &err);
690 if (((rd_val & bmask) == cmp_val) && !err) 757 if (((rd_val & bmask) == cmp_val) && !err)
691 break; 758 break;
692 brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n", 759
693 try_cnt, MAX_KSO_ATTEMPTS, err);
694 udelay(KSO_WAIT_US); 760 udelay(KSO_WAIT_US);
695 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, 761 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
696 wr_val, &err); 762 wr_val, &err);
697 } while (try_cnt++ < MAX_KSO_ATTEMPTS); 763 } while (try_cnt++ < MAX_KSO_ATTEMPTS);
698 764
765 if (try_cnt > 2)
766 brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
767 rd_val, err);
768
769 if (try_cnt > MAX_KSO_ATTEMPTS)
770 brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
771
699 return err; 772 return err;
700} 773}
701 774
702#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
703
704#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) 775#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
705 776
706/* Turn backplane clock on or off */ 777/* Turn backplane clock on or off */
@@ -799,7 +870,6 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
799 } 870 }
800#endif /* defined (DEBUG) */ 871#endif /* defined (DEBUG) */
801 872
802 bus->activity = true;
803 } else { 873 } else {
804 clkreq = 0; 874 clkreq = 0;
805 875
@@ -899,8 +969,9 @@ static int
899brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok) 969brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
900{ 970{
901 int err = 0; 971 int err = 0;
902 brcmf_dbg(TRACE, "Enter\n"); 972 u8 clkcsr;
903 brcmf_dbg(SDIO, "request %s currently %s\n", 973
974 brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
904 (sleep ? "SLEEP" : "WAKE"), 975 (sleep ? "SLEEP" : "WAKE"),
905 (bus->sleeping ? "SLEEP" : "WAKE")); 976 (bus->sleeping ? "SLEEP" : "WAKE"));
906 977
@@ -917,8 +988,20 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
917 atomic_read(&bus->ipend) > 0 || 988 atomic_read(&bus->ipend) > 0 ||
918 (!atomic_read(&bus->fcstate) && 989 (!atomic_read(&bus->fcstate) &&
919 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && 990 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
920 data_ok(bus))) 991 data_ok(bus))) {
921 return -EBUSY; 992 err = -EBUSY;
993 goto done;
994 }
995
996 clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
997 SBSDIO_FUNC1_CHIPCLKCSR,
998 &err);
999 if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
1000 brcmf_dbg(SDIO, "no clock, set ALP\n");
1001 brcmf_sdiod_regwb(bus->sdiodev,
1002 SBSDIO_FUNC1_CHIPCLKCSR,
1003 SBSDIO_ALP_AVAIL_REQ, &err);
1004 }
922 err = brcmf_sdio_kso_control(bus, false); 1005 err = brcmf_sdio_kso_control(bus, false);
923 /* disable watchdog */ 1006 /* disable watchdog */
924 if (!err) 1007 if (!err)
@@ -935,7 +1018,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
935 } else { 1018 } else {
936 brcmf_err("error while changing bus sleep state %d\n", 1019 brcmf_err("error while changing bus sleep state %d\n",
937 err); 1020 err);
938 return err; 1021 goto done;
939 } 1022 }
940 } 1023 }
941 1024
@@ -947,11 +1030,92 @@ end:
947 } else { 1030 } else {
948 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); 1031 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
949 } 1032 }
950 1033done:
1034 brcmf_dbg(SDIO, "Exit: err=%d\n", err);
951 return err; 1035 return err;
952 1036
953} 1037}
954 1038
1039#ifdef DEBUG
1040static inline bool brcmf_sdio_valid_shared_address(u32 addr)
1041{
1042 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
1043}
1044
1045static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
1046 struct sdpcm_shared *sh)
1047{
1048 u32 addr;
1049 int rv;
1050 u32 shaddr = 0;
1051 struct sdpcm_shared_le sh_le;
1052 __le32 addr_le;
1053
1054 shaddr = bus->ci->rambase + bus->ramsize - 4;
1055
1056 /*
1057 * Read last word in socram to determine
1058 * address of sdpcm_shared structure
1059 */
1060 sdio_claim_host(bus->sdiodev->func[1]);
1061 brcmf_sdio_bus_sleep(bus, false, false);
1062 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
1063 sdio_release_host(bus->sdiodev->func[1]);
1064 if (rv < 0)
1065 return rv;
1066
1067 addr = le32_to_cpu(addr_le);
1068
1069 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
1070
1071 /*
1072 * Check if addr is valid.
1073 * NVRAM length at the end of memory should have been overwritten.
1074 */
1075 if (!brcmf_sdio_valid_shared_address(addr)) {
1076 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
1077 addr);
1078 return -EINVAL;
1079 }
1080
1081 /* Read hndrte_shared structure */
1082 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
1083 sizeof(struct sdpcm_shared_le));
1084 if (rv < 0)
1085 return rv;
1086
1087 /* Endianness */
1088 sh->flags = le32_to_cpu(sh_le.flags);
1089 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
1090 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
1091 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
1092 sh->assert_line = le32_to_cpu(sh_le.assert_line);
1093 sh->console_addr = le32_to_cpu(sh_le.console_addr);
1094 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
1095
1096 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
1097 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
1098 SDPCM_SHARED_VERSION,
1099 sh->flags & SDPCM_SHARED_VERSION_MASK);
1100 return -EPROTO;
1101 }
1102
1103 return 0;
1104}
1105
1106static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1107{
1108 struct sdpcm_shared sh;
1109
1110 if (brcmf_sdio_readshared(bus, &sh) == 0)
1111 bus->console_addr = sh.console_addr;
1112}
1113#else
1114static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1115{
1116}
1117#endif /* DEBUG */
1118
955static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) 1119static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
956{ 1120{
957 u32 intstatus = 0; 1121 u32 intstatus = 0;
@@ -995,6 +1159,12 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
995 else 1159 else
996 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n", 1160 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
997 bus->sdpcm_ver); 1161 bus->sdpcm_ver);
1162
1163 /*
1164 * Retrieve console state address now that firmware should have
1165 * updated it.
1166 */
1167 brcmf_sdio_get_console_addr(bus);
998 } 1168 }
999 1169
1000 /* 1170 /*
@@ -1083,6 +1253,28 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1083 bus->cur_read.len = 0; 1253 bus->cur_read.len = 0;
1084} 1254}
1085 1255
1256static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
1257{
1258 struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
1259 u8 i, hi, lo;
1260
1261 /* On failure, abort the command and terminate the frame */
1262 brcmf_err("sdio error, abort command and terminate frame\n");
1263 bus->sdcnt.tx_sderrs++;
1264
1265 brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
1266 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
1267 bus->sdcnt.f1regdata++;
1268
1269 for (i = 0; i < 3; i++) {
1270 hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
1271 lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
1272 bus->sdcnt.f1regdata += 2;
1273 if ((hi == 0) && (lo == 0))
1274 break;
1275 }
1276}
1277
1086/* return total length of buffer chain */ 1278/* return total length of buffer chain */
1087static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus) 1279static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
1088{ 1280{
@@ -1955,7 +2147,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
1955 memcpy(pkt_pad->data, 2147 memcpy(pkt_pad->data,
1956 pkt->data + pkt->len - tail_chop, 2148 pkt->data + pkt->len - tail_chop,
1957 tail_chop); 2149 tail_chop);
1958 *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; 2150 *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
1959 skb_trim(pkt, pkt->len - tail_chop); 2151 skb_trim(pkt, pkt->len - tail_chop);
1960 skb_trim(pkt_pad, tail_pad + tail_chop); 2152 skb_trim(pkt_pad, tail_pad + tail_chop);
1961 __skb_queue_after(pktq, pkt, pkt_pad); 2153 __skb_queue_after(pktq, pkt, pkt_pad);
@@ -2003,7 +2195,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2003 * already properly aligned and does not 2195 * already properly aligned and does not
2004 * need an sdpcm header. 2196 * need an sdpcm header.
2005 */ 2197 */
2006 if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG) 2198 if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
2007 continue; 2199 continue;
2008 2200
2009 /* align packet data pointer */ 2201 /* align packet data pointer */
@@ -2037,10 +2229,10 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2037 if (BRCMF_BYTES_ON() && 2229 if (BRCMF_BYTES_ON() &&
2038 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) || 2230 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
2039 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL))) 2231 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
2040 brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, 2232 brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
2041 "Tx Frame:\n"); 2233 "Tx Frame:\n");
2042 else if (BRCMF_HDRS_ON()) 2234 else if (BRCMF_HDRS_ON())
2043 brcmf_dbg_hex_dump(true, pkt_next, 2235 brcmf_dbg_hex_dump(true, pkt_next->data,
2044 head_pad + bus->tx_hdrlen, 2236 head_pad + bus->tx_hdrlen,
2045 "Tx Header:\n"); 2237 "Tx Header:\n");
2046 } 2238 }
@@ -2067,11 +2259,11 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
2067 u8 *hdr; 2259 u8 *hdr;
2068 u32 dat_offset; 2260 u32 dat_offset;
2069 u16 tail_pad; 2261 u16 tail_pad;
2070 u32 dummy_flags, chop_len; 2262 u16 dummy_flags, chop_len;
2071 struct sk_buff *pkt_next, *tmp, *pkt_prev; 2263 struct sk_buff *pkt_next, *tmp, *pkt_prev;
2072 2264
2073 skb_queue_walk_safe(pktq, pkt_next, tmp) { 2265 skb_queue_walk_safe(pktq, pkt_next, tmp) {
2074 dummy_flags = *(u32 *)(pkt_next->cb); 2266 dummy_flags = *(u16 *)(pkt_next->cb);
2075 if (dummy_flags & ALIGN_SKB_FLAG) { 2267 if (dummy_flags & ALIGN_SKB_FLAG) {
2076 chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK; 2268 chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
2077 if (chop_len) { 2269 if (chop_len) {
@@ -2100,7 +2292,6 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2100 uint chan) 2292 uint chan)
2101{ 2293{
2102 int ret; 2294 int ret;
2103 int i;
2104 struct sk_buff *pkt_next, *tmp; 2295 struct sk_buff *pkt_next, *tmp;
2105 2296
2106 brcmf_dbg(TRACE, "Enter\n"); 2297 brcmf_dbg(TRACE, "Enter\n");
@@ -2113,28 +2304,9 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2113 ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq); 2304 ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
2114 bus->sdcnt.f2txdata++; 2305 bus->sdcnt.f2txdata++;
2115 2306
2116 if (ret < 0) { 2307 if (ret < 0)
2117 /* On failure, abort the command and terminate the frame */ 2308 brcmf_sdio_txfail(bus);
2118 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2119 ret);
2120 bus->sdcnt.tx_sderrs++;
2121 2309
2122 brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
2123 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2124 SFC_WF_TERM, NULL);
2125 bus->sdcnt.f1regdata++;
2126
2127 for (i = 0; i < 3; i++) {
2128 u8 hi, lo;
2129 hi = brcmf_sdiod_regrb(bus->sdiodev,
2130 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2131 lo = brcmf_sdiod_regrb(bus->sdiodev,
2132 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2133 bus->sdcnt.f1regdata += 2;
2134 if ((hi == 0) && (lo == 0))
2135 break;
2136 }
2137 }
2138 sdio_release_host(bus->sdiodev->func[1]); 2310 sdio_release_host(bus->sdiodev->func[1]);
2139 2311
2140done: 2312done:
@@ -2164,13 +2336,15 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2164 /* Send frames until the limit or some other event */ 2336 /* Send frames until the limit or some other event */
2165 for (cnt = 0; (cnt < maxframes) && data_ok(bus);) { 2337 for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
2166 pkt_num = 1; 2338 pkt_num = 1;
2167 __skb_queue_head_init(&pktq); 2339 if (down_interruptible(&bus->tx_seq_lock))
2340 return cnt;
2168 if (bus->txglom) 2341 if (bus->txglom)
2169 pkt_num = min_t(u8, bus->tx_max - bus->tx_seq, 2342 pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
2170 brcmf_sdio_txglomsz); 2343 bus->sdiodev->txglomsz);
2171 pkt_num = min_t(u32, pkt_num, 2344 pkt_num = min_t(u32, pkt_num,
2172 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)); 2345 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
2173 spin_lock_bh(&bus->txqlock); 2346 __skb_queue_head_init(&pktq);
2347 spin_lock_bh(&bus->txq_lock);
2174 for (i = 0; i < pkt_num; i++) { 2348 for (i = 0; i < pkt_num; i++) {
2175 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, 2349 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
2176 &prec_out); 2350 &prec_out);
@@ -2178,15 +2352,19 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2178 break; 2352 break;
2179 __skb_queue_tail(&pktq, pkt); 2353 __skb_queue_tail(&pktq, pkt);
2180 } 2354 }
2181 spin_unlock_bh(&bus->txqlock); 2355 spin_unlock_bh(&bus->txq_lock);
2182 if (i == 0) 2356 if (i == 0) {
2357 up(&bus->tx_seq_lock);
2183 break; 2358 break;
2359 }
2184 2360
2185 ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL); 2361 ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
2362 up(&bus->tx_seq_lock);
2363
2186 cnt += i; 2364 cnt += i;
2187 2365
2188 /* In poll mode, need to check for other events */ 2366 /* In poll mode, need to check for other events */
2189 if (!bus->intr && cnt) { 2367 if (!bus->intr) {
2190 /* Check device status, signal pending interrupt */ 2368 /* Check device status, signal pending interrupt */
2191 sdio_claim_host(bus->sdiodev->func[1]); 2369 sdio_claim_host(bus->sdiodev->func[1]);
2192 ret = r_sdreg32(bus, &intstatus, 2370 ret = r_sdreg32(bus, &intstatus,
@@ -2211,6 +2389,68 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2211 return cnt; 2389 return cnt;
2212} 2390}
2213 2391
2392static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
2393{
2394 u8 doff;
2395 u16 pad;
2396 uint retries = 0;
2397 struct brcmf_sdio_hdrinfo hd_info = {0};
2398 int ret;
2399
2400 brcmf_dbg(TRACE, "Enter\n");
2401
2402 /* Back the pointer to make room for bus header */
2403 frame -= bus->tx_hdrlen;
2404 len += bus->tx_hdrlen;
2405
2406 /* Add alignment padding (optional for ctl frames) */
2407 doff = ((unsigned long)frame % bus->head_align);
2408 if (doff) {
2409 frame -= doff;
2410 len += doff;
2411 memset(frame + bus->tx_hdrlen, 0, doff);
2412 }
2413
2414 /* Round send length to next SDIO block */
2415 pad = 0;
2416 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2417 pad = bus->blocksize - (len % bus->blocksize);
2418 if ((pad > bus->roundup) || (pad >= bus->blocksize))
2419 pad = 0;
2420 } else if (len % bus->head_align) {
2421 pad = bus->head_align - (len % bus->head_align);
2422 }
2423 len += pad;
2424
2425 hd_info.len = len - pad;
2426 hd_info.channel = SDPCM_CONTROL_CHANNEL;
2427 hd_info.dat_offset = doff + bus->tx_hdrlen;
2428 hd_info.seq_num = bus->tx_seq;
2429 hd_info.lastfrm = true;
2430 hd_info.tail_pad = pad;
2431 brcmf_sdio_hdpack(bus, frame, &hd_info);
2432
2433 if (bus->txglom)
2434 brcmf_sdio_update_hwhdr(frame, len);
2435
2436 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2437 frame, len, "Tx Frame:\n");
2438 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2439 BRCMF_HDRS_ON(),
2440 frame, min_t(u16, len, 16), "TxHdr:\n");
2441
2442 do {
2443 ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
2444
2445 if (ret < 0)
2446 brcmf_sdio_txfail(bus);
2447 else
2448 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2449 } while (ret < 0 && retries++ < TXRETRIES);
2450
2451 return ret;
2452}
2453
2214static void brcmf_sdio_bus_stop(struct device *dev) 2454static void brcmf_sdio_bus_stop(struct device *dev)
2215{ 2455{
2216 u32 local_hostintmask; 2456 u32 local_hostintmask;
@@ -2292,21 +2532,29 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
2292 } 2532 }
2293} 2533}
2294 2534
2535static void atomic_orr(int val, atomic_t *v)
2536{
2537 int old_val;
2538
2539 old_val = atomic_read(v);
2540 while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
2541 old_val = atomic_read(v);
2542}
2543
2295static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) 2544static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2296{ 2545{
2297 u8 idx; 2546 struct brcmf_core *buscore;
2298 u32 addr; 2547 u32 addr;
2299 unsigned long val; 2548 unsigned long val;
2300 int n, ret; 2549 int ret;
2301 2550
2302 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); 2551 buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
2303 addr = bus->ci->c_inf[idx].base + 2552 addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
2304 offsetof(struct sdpcmd_regs, intstatus);
2305 2553
2306 val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret); 2554 val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
2307 bus->sdcnt.f1regdata++; 2555 bus->sdcnt.f1regdata++;
2308 if (ret != 0) 2556 if (ret != 0)
2309 val = 0; 2557 return ret;
2310 2558
2311 val &= bus->hostintmask; 2559 val &= bus->hostintmask;
2312 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE)); 2560 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
@@ -2315,13 +2563,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2315 if (val) { 2563 if (val) {
2316 brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret); 2564 brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
2317 bus->sdcnt.f1regdata++; 2565 bus->sdcnt.f1regdata++;
2318 } 2566 atomic_orr(val, &bus->intstatus);
2319
2320 if (ret) {
2321 atomic_set(&bus->intstatus, 0);
2322 } else if (val) {
2323 for_each_set_bit(n, &val, 32)
2324 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2325 } 2567 }
2326 2568
2327 return ret; 2569 return ret;
@@ -2331,10 +2573,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2331{ 2573{
2332 u32 newstatus = 0; 2574 u32 newstatus = 0;
2333 unsigned long intstatus; 2575 unsigned long intstatus;
2334 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2335 uint txlimit = bus->txbound; /* Tx frames to send before resched */ 2576 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2336 uint framecnt = 0; /* Temporary counter of tx/rx frames */ 2577 uint framecnt; /* Temporary counter of tx/rx frames */
2337 int err = 0, n; 2578 int err = 0;
2338 2579
2339 brcmf_dbg(TRACE, "Enter\n"); 2580 brcmf_dbg(TRACE, "Enter\n");
2340 2581
@@ -2431,70 +2672,38 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2431 intstatus &= ~I_HMB_FRAME_IND; 2672 intstatus &= ~I_HMB_FRAME_IND;
2432 2673
2433 /* On frame indication, read available frames */ 2674 /* On frame indication, read available frames */
2434 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) { 2675 if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
2435 framecnt = brcmf_sdio_readframes(bus, rxlimit); 2676 brcmf_sdio_readframes(bus, bus->rxbound);
2436 if (!bus->rxpending) 2677 if (!bus->rxpending)
2437 intstatus &= ~I_HMB_FRAME_IND; 2678 intstatus &= ~I_HMB_FRAME_IND;
2438 rxlimit -= min(framecnt, rxlimit);
2439 } 2679 }
2440 2680
2441 /* Keep still-pending events for next scheduling */ 2681 /* Keep still-pending events for next scheduling */
2442 if (intstatus) { 2682 if (intstatus)
2443 for_each_set_bit(n, &intstatus, 32) 2683 atomic_orr(intstatus, &bus->intstatus);
2444 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2445 }
2446 2684
2447 brcmf_sdio_clrintr(bus); 2685 brcmf_sdio_clrintr(bus);
2448 2686
2449 if (data_ok(bus) && bus->ctrl_frame_stat && 2687 if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
2450 (bus->clkstate == CLK_AVAIL)) { 2688 (down_interruptible(&bus->tx_seq_lock) == 0)) {
2451 int i; 2689 if (data_ok(bus)) {
2452 2690 sdio_claim_host(bus->sdiodev->func[1]);
2453 sdio_claim_host(bus->sdiodev->func[1]); 2691 err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
2454 err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf, 2692 bus->ctrl_frame_len);
2455 (u32)bus->ctrl_frame_len); 2693 sdio_release_host(bus->sdiodev->func[1]);
2456
2457 if (err < 0) {
2458 /* On failure, abort the command and
2459 terminate the frame */
2460 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2461 err);
2462 bus->sdcnt.tx_sderrs++;
2463
2464 brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
2465
2466 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2467 SFC_WF_TERM, &err);
2468 bus->sdcnt.f1regdata++;
2469
2470 for (i = 0; i < 3; i++) {
2471 u8 hi, lo;
2472 hi = brcmf_sdiod_regrb(bus->sdiodev,
2473 SBSDIO_FUNC1_WFRAMEBCHI,
2474 &err);
2475 lo = brcmf_sdiod_regrb(bus->sdiodev,
2476 SBSDIO_FUNC1_WFRAMEBCLO,
2477 &err);
2478 bus->sdcnt.f1regdata += 2;
2479 if ((hi == 0) && (lo == 0))
2480 break;
2481 }
2482 2694
2483 } else { 2695 bus->ctrl_frame_stat = false;
2484 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP; 2696 brcmf_sdio_wait_event_wakeup(bus);
2485 } 2697 }
2486 sdio_release_host(bus->sdiodev->func[1]); 2698 up(&bus->tx_seq_lock);
2487 bus->ctrl_frame_stat = false;
2488 brcmf_sdio_wait_event_wakeup(bus);
2489 } 2699 }
2490 /* Send queued frames (limit 1 if rx may still be pending) */ 2700 /* Send queued frames (limit 1 if rx may still be pending) */
2491 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) && 2701 if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2492 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit 2702 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
2493 && data_ok(bus)) { 2703 data_ok(bus)) {
2494 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) : 2704 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2495 txlimit; 2705 txlimit;
2496 framecnt = brcmf_sdio_sendfromq(bus, framecnt); 2706 brcmf_sdio_sendfromq(bus, framecnt);
2497 txlimit -= framecnt;
2498 } 2707 }
2499 2708
2500 if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) { 2709 if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
@@ -2504,19 +2713,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2504 atomic_read(&bus->ipend) > 0 || 2713 atomic_read(&bus->ipend) > 0 ||
2505 (!atomic_read(&bus->fcstate) && 2714 (!atomic_read(&bus->fcstate) &&
2506 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && 2715 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2507 data_ok(bus)) || PKT_AVAILABLE()) { 2716 data_ok(bus))) {
2508 atomic_inc(&bus->dpc_tskcnt); 2717 atomic_inc(&bus->dpc_tskcnt);
2509 } 2718 }
2510
2511 /* If we're done for now, turn off clock request. */
2512 if ((bus->clkstate != CLK_PENDING)
2513 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2514 bus->activity = false;
2515 brcmf_dbg(SDIO, "idle state\n");
2516 sdio_claim_host(bus->sdiodev->func[1]);
2517 brcmf_sdio_bus_sleep(bus, true, false);
2518 sdio_release_host(bus->sdiodev->func[1]);
2519 }
2520} 2719}
2521 2720
2522static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev) 2721static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
@@ -2531,15 +2730,12 @@ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
2531static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) 2730static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
2532{ 2731{
2533 int ret = -EBADE; 2732 int ret = -EBADE;
2534 uint datalen, prec; 2733 uint prec;
2535 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2734 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2536 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2735 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2537 struct brcmf_sdio *bus = sdiodev->bus; 2736 struct brcmf_sdio *bus = sdiodev->bus;
2538 ulong flags;
2539 2737
2540 brcmf_dbg(TRACE, "Enter\n"); 2738 brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
2541
2542 datalen = pkt->len;
2543 2739
2544 /* Add space for the header */ 2740 /* Add space for the header */
2545 skb_push(pkt, bus->tx_hdrlen); 2741 skb_push(pkt, bus->tx_hdrlen);
@@ -2553,7 +2749,9 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
2553 bus->sdcnt.fcqueued++; 2749 bus->sdcnt.fcqueued++;
2554 2750
2555 /* Priority based enq */ 2751 /* Priority based enq */
2556 spin_lock_irqsave(&bus->txqlock, flags); 2752 spin_lock_bh(&bus->txq_lock);
2753 /* reset bus_flags in packet cb */
2754 *(u16 *)(pkt->cb) = 0;
2557 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { 2755 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2558 skb_pull(pkt, bus->tx_hdrlen); 2756 skb_pull(pkt, bus->tx_hdrlen);
2559 brcmf_err("out of bus->txq !!!\n"); 2757 brcmf_err("out of bus->txq !!!\n");
@@ -2566,7 +2764,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
2566 bus->txoff = true; 2764 bus->txoff = true;
2567 brcmf_txflowblock(bus->sdiodev->dev, true); 2765 brcmf_txflowblock(bus->sdiodev->dev, true);
2568 } 2766 }
2569 spin_unlock_irqrestore(&bus->txqlock, flags); 2767 spin_unlock_bh(&bus->txq_lock);
2570 2768
2571#ifdef DEBUG 2769#ifdef DEBUG
2572 if (pktq_plen(&bus->txq, prec) > qcount[prec]) 2770 if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2661,110 +2859,27 @@ break2:
2661} 2859}
2662#endif /* DEBUG */ 2860#endif /* DEBUG */
2663 2861
2664static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2665{
2666 int i;
2667 int ret;
2668
2669 bus->ctrl_frame_stat = false;
2670 ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
2671
2672 if (ret < 0) {
2673 /* On failure, abort the command and terminate the frame */
2674 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2675 ret);
2676 bus->sdcnt.tx_sderrs++;
2677
2678 brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
2679
2680 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2681 SFC_WF_TERM, NULL);
2682 bus->sdcnt.f1regdata++;
2683
2684 for (i = 0; i < 3; i++) {
2685 u8 hi, lo;
2686 hi = brcmf_sdiod_regrb(bus->sdiodev,
2687 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2688 lo = brcmf_sdiod_regrb(bus->sdiodev,
2689 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2690 bus->sdcnt.f1regdata += 2;
2691 if (hi == 0 && lo == 0)
2692 break;
2693 }
2694 return ret;
2695 }
2696
2697 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2698
2699 return ret;
2700}
2701
2702static int 2862static int
2703brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) 2863brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2704{ 2864{
2705 u8 *frame;
2706 u16 len, pad;
2707 uint retries = 0;
2708 u8 doff = 0;
2709 int ret = -1;
2710 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2865 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2711 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2866 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2712 struct brcmf_sdio *bus = sdiodev->bus; 2867 struct brcmf_sdio *bus = sdiodev->bus;
2713 struct brcmf_sdio_hdrinfo hd_info = {0}; 2868 int ret = -1;
2714 2869
2715 brcmf_dbg(TRACE, "Enter\n"); 2870 brcmf_dbg(TRACE, "Enter\n");
2716 2871
2717 /* Back the pointer to make a room for bus header */ 2872 if (down_interruptible(&bus->tx_seq_lock))
2718 frame = msg - bus->tx_hdrlen; 2873 return -EINTR;
2719 len = (msglen += bus->tx_hdrlen);
2720
2721 /* Add alignment padding (optional for ctl frames) */
2722 doff = ((unsigned long)frame % bus->head_align);
2723 if (doff) {
2724 frame -= doff;
2725 len += doff;
2726 msglen += doff;
2727 memset(frame, 0, doff + bus->tx_hdrlen);
2728 }
2729 /* precondition: doff < bus->head_align */
2730 doff += bus->tx_hdrlen;
2731
2732 /* Round send length to next SDIO block */
2733 pad = 0;
2734 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2735 pad = bus->blocksize - (len % bus->blocksize);
2736 if ((pad > bus->roundup) || (pad >= bus->blocksize))
2737 pad = 0;
2738 } else if (len % bus->head_align) {
2739 pad = bus->head_align - (len % bus->head_align);
2740 }
2741 len += pad;
2742
2743 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2744
2745 /* Make sure backplane clock is on */
2746 sdio_claim_host(bus->sdiodev->func[1]);
2747 brcmf_sdio_bus_sleep(bus, false, false);
2748 sdio_release_host(bus->sdiodev->func[1]);
2749
2750 hd_info.len = (u16)msglen;
2751 hd_info.channel = SDPCM_CONTROL_CHANNEL;
2752 hd_info.dat_offset = doff;
2753 hd_info.seq_num = bus->tx_seq;
2754 hd_info.lastfrm = true;
2755 hd_info.tail_pad = pad;
2756 brcmf_sdio_hdpack(bus, frame, &hd_info);
2757
2758 if (bus->txglom)
2759 brcmf_sdio_update_hwhdr(frame, len);
2760 2874
2761 if (!data_ok(bus)) { 2875 if (!data_ok(bus)) {
2762 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n", 2876 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2763 bus->tx_max, bus->tx_seq); 2877 bus->tx_max, bus->tx_seq);
2764 bus->ctrl_frame_stat = true; 2878 up(&bus->tx_seq_lock);
2765 /* Send from dpc */ 2879 /* Send from dpc */
2766 bus->ctrl_frame_buf = frame; 2880 bus->ctrl_frame_buf = msg;
2767 bus->ctrl_frame_len = len; 2881 bus->ctrl_frame_len = msglen;
2882 bus->ctrl_frame_stat = true;
2768 2883
2769 wait_event_interruptible_timeout(bus->ctrl_wait, 2884 wait_event_interruptible_timeout(bus->ctrl_wait,
2770 !bus->ctrl_frame_stat, 2885 !bus->ctrl_frame_stat,
@@ -2775,31 +2890,18 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2775 ret = 0; 2890 ret = 0;
2776 } else { 2891 } else {
2777 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n"); 2892 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2893 bus->ctrl_frame_stat = false;
2894 if (down_interruptible(&bus->tx_seq_lock))
2895 return -EINTR;
2778 ret = -1; 2896 ret = -1;
2779 } 2897 }
2780 } 2898 }
2781
2782 if (ret == -1) { 2899 if (ret == -1) {
2783 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2784 frame, len, "Tx Frame:\n");
2785 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2786 BRCMF_HDRS_ON(),
2787 frame, min_t(u16, len, 16), "TxHdr:\n");
2788
2789 do {
2790 sdio_claim_host(bus->sdiodev->func[1]);
2791 ret = brcmf_sdio_tx_frame(bus, frame, len);
2792 sdio_release_host(bus->sdiodev->func[1]);
2793 } while (ret < 0 && retries++ < TXRETRIES);
2794 }
2795
2796 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2797 atomic_read(&bus->dpc_tskcnt) == 0) {
2798 bus->activity = false;
2799 sdio_claim_host(bus->sdiodev->func[1]); 2900 sdio_claim_host(bus->sdiodev->func[1]);
2800 brcmf_dbg(INFO, "idle\n"); 2901 brcmf_sdio_bus_sleep(bus, false, false);
2801 brcmf_sdio_clkctl(bus, CLK_NONE, true); 2902 ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
2802 sdio_release_host(bus->sdiodev->func[1]); 2903 sdio_release_host(bus->sdiodev->func[1]);
2904 up(&bus->tx_seq_lock);
2803 } 2905 }
2804 2906
2805 if (ret) 2907 if (ret)
@@ -2811,72 +2913,6 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2811} 2913}
2812 2914
2813#ifdef DEBUG 2915#ifdef DEBUG
2814static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2815{
2816 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2817}
2818
2819static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2820 struct sdpcm_shared *sh)
2821{
2822 u32 addr;
2823 int rv;
2824 u32 shaddr = 0;
2825 struct sdpcm_shared_le sh_le;
2826 __le32 addr_le;
2827
2828 shaddr = bus->ci->rambase + bus->ramsize - 4;
2829
2830 /*
2831 * Read last word in socram to determine
2832 * address of sdpcm_shared structure
2833 */
2834 sdio_claim_host(bus->sdiodev->func[1]);
2835 brcmf_sdio_bus_sleep(bus, false, false);
2836 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
2837 sdio_release_host(bus->sdiodev->func[1]);
2838 if (rv < 0)
2839 return rv;
2840
2841 addr = le32_to_cpu(addr_le);
2842
2843 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
2844
2845 /*
2846 * Check if addr is valid.
2847 * NVRAM length at the end of memory should have been overwritten.
2848 */
2849 if (!brcmf_sdio_valid_shared_address(addr)) {
2850 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2851 addr);
2852 return -EINVAL;
2853 }
2854
2855 /* Read hndrte_shared structure */
2856 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
2857 sizeof(struct sdpcm_shared_le));
2858 if (rv < 0)
2859 return rv;
2860
2861 /* Endianness */
2862 sh->flags = le32_to_cpu(sh_le.flags);
2863 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
2864 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
2865 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
2866 sh->assert_line = le32_to_cpu(sh_le.assert_line);
2867 sh->console_addr = le32_to_cpu(sh_le.console_addr);
2868 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2869
2870 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
2871 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2872 SDPCM_SHARED_VERSION,
2873 sh->flags & SDPCM_SHARED_VERSION_MASK);
2874 return -EPROTO;
2875 }
2876
2877 return 0;
2878}
2879
2880static int brcmf_sdio_dump_console(struct brcmf_sdio *bus, 2916static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2881 struct sdpcm_shared *sh, char __user *data, 2917 struct sdpcm_shared *sh, char __user *data,
2882 size_t count) 2918 size_t count)
@@ -3106,6 +3142,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3106 debugfs_create_file("forensics", S_IRUGO, dentry, bus, 3142 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
3107 &brcmf_sdio_forensic_ops); 3143 &brcmf_sdio_forensic_ops);
3108 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt); 3144 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
3145 debugfs_create_u32("console_interval", 0644, dentry,
3146 &bus->console_interval);
3109} 3147}
3110#else 3148#else
3111static int brcmf_sdio_checkdied(struct brcmf_sdio *bus) 3149static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@@ -3224,32 +3262,17 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
3224 const struct firmware *fw) 3262 const struct firmware *fw)
3225{ 3263{
3226 int err; 3264 int err;
3227 int offset;
3228 int address;
3229 int len;
3230 3265
3231 brcmf_dbg(TRACE, "Enter\n"); 3266 brcmf_dbg(TRACE, "Enter\n");
3232 3267
3233 err = 0; 3268 err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
3234 offset = 0; 3269 (u8 *)fw->data, fw->size);
3235 address = bus->ci->rambase; 3270 if (err)
3236 while (offset < fw->size) { 3271 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3237 len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK : 3272 err, (int)fw->size, bus->ci->rambase);
3238 fw->size - offset; 3273 else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
3239 err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, 3274 (u8 *)fw->data, fw->size))
3240 (u8 *)&fw->data[offset], len); 3275 err = -EIO;
3241 if (err) {
3242 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3243 err, len, address);
3244 return err;
3245 }
3246 offset += len;
3247 address += len;
3248 }
3249 if (!err)
3250 if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
3251 (u8 *)fw->data, fw->size))
3252 err = -EIO;
3253 3276
3254 return err; 3277 return err;
3255} 3278}
@@ -3292,7 +3315,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
3292 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 3315 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3293 3316
3294 /* Keep arm in reset */ 3317 /* Keep arm in reset */
3295 brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci); 3318 brcmf_chip_enter_download(bus->ci);
3296 3319
3297 fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN); 3320 fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
3298 if (fw == NULL) { 3321 if (fw == NULL) {
@@ -3324,7 +3347,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
3324 } 3347 }
3325 3348
3326 /* Take arm out of reset */ 3349 /* Take arm out of reset */
3327 if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) { 3350 if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
3328 brcmf_err("error getting out of ARM core reset\n"); 3351 brcmf_err("error getting out of ARM core reset\n");
3329 goto err; 3352 goto err;
3330 } 3353 }
@@ -3339,40 +3362,6 @@ err:
3339 return bcmerror; 3362 return bcmerror;
3340} 3363}
3341 3364
3342static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
3343{
3344 u32 addr, reg, pmu_cc3_mask = ~0;
3345 int err;
3346
3347 brcmf_dbg(TRACE, "Enter\n");
3348
3349 /* old chips with PMU version less than 17 don't support save restore */
3350 if (bus->ci->pmurev < 17)
3351 return false;
3352
3353 switch (bus->ci->chip) {
3354 case BCM43241_CHIP_ID:
3355 case BCM4335_CHIP_ID:
3356 case BCM4339_CHIP_ID:
3357 /* read PMU chipcontrol register 3 */
3358 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
3359 brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
3360 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
3361 reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
3362 return (reg & pmu_cc3_mask) != 0;
3363 default:
3364 addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
3365 reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
3366 if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
3367 return false;
3368
3369 addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
3370 reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
3371 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
3372 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
3373 }
3374}
3375
3376static void brcmf_sdio_sr_init(struct brcmf_sdio *bus) 3365static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
3377{ 3366{
3378 int err = 0; 3367 int err = 0;
@@ -3424,7 +3413,7 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
3424 brcmf_dbg(TRACE, "Enter\n"); 3413 brcmf_dbg(TRACE, "Enter\n");
3425 3414
3426 /* KSO bit added in SDIO core rev 12 */ 3415 /* KSO bit added in SDIO core rev 12 */
3427 if (bus->ci->c_inf[1].rev < 12) 3416 if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
3428 return 0; 3417 return 0;
3429 3418
3430 val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err); 3419 val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
@@ -3455,15 +3444,13 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
3455 struct brcmf_sdio *bus = sdiodev->bus; 3444 struct brcmf_sdio *bus = sdiodev->bus;
3456 uint pad_size; 3445 uint pad_size;
3457 u32 value; 3446 u32 value;
3458 u8 idx;
3459 int err; 3447 int err;
3460 3448
3461 /* the commands below use the terms tx and rx from 3449 /* the commands below use the terms tx and rx from
3462 * a device perspective, ie. bus:txglom affects the 3450 * a device perspective, ie. bus:txglom affects the
3463 * bus transfers from device to host. 3451 * bus transfers from device to host.
3464 */ 3452 */
3465 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); 3453 if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
3466 if (bus->ci->c_inf[idx].rev < 12) {
3467 /* for sdio core rev < 12, disable txgloming */ 3454 /* for sdio core rev < 12, disable txgloming */
3468 value = 0; 3455 value = 0;
3469 err = brcmf_iovar_data_set(dev, "bus:txglom", &value, 3456 err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3570,7 +3557,7 @@ static int brcmf_sdio_bus_init(struct device *dev)
3570 ret = -ENODEV; 3557 ret = -ENODEV;
3571 } 3558 }
3572 3559
3573 if (brcmf_sdio_sr_capable(bus)) { 3560 if (brcmf_chip_sr_capable(bus->ci)) {
3574 brcmf_sdio_sr_init(bus); 3561 brcmf_sdio_sr_init(bus);
3575 } else { 3562 } else {
3576 /* Restore previous clock setting */ 3563 /* Restore previous clock setting */
@@ -3714,11 +3701,175 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
3714 datawork); 3701 datawork);
3715 3702
3716 while (atomic_read(&bus->dpc_tskcnt)) { 3703 while (atomic_read(&bus->dpc_tskcnt)) {
3704 atomic_set(&bus->dpc_tskcnt, 0);
3717 brcmf_sdio_dpc(bus); 3705 brcmf_sdio_dpc(bus);
3718 atomic_dec(&bus->dpc_tskcnt);
3719 } 3706 }
3720} 3707}
3721 3708
3709static void
3710brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
3711 struct brcmf_chip *ci, u32 drivestrength)
3712{
3713 const struct sdiod_drive_str *str_tab = NULL;
3714 u32 str_mask;
3715 u32 str_shift;
3716 u32 base;
3717 u32 i;
3718 u32 drivestrength_sel = 0;
3719 u32 cc_data_temp;
3720 u32 addr;
3721
3722 if (!(ci->cc_caps & CC_CAP_PMU))
3723 return;
3724
3725 switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
3726 case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
3727 str_tab = sdiod_drvstr_tab1_1v8;
3728 str_mask = 0x00003800;
3729 str_shift = 11;
3730 break;
3731 case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
3732 str_tab = sdiod_drvstr_tab6_1v8;
3733 str_mask = 0x00001800;
3734 str_shift = 11;
3735 break;
3736 case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
3737 /* note: 43143 does not support tristate */
3738 i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
3739 if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
3740 str_tab = sdiod_drvstr_tab2_3v3;
3741 str_mask = 0x00000007;
3742 str_shift = 0;
3743 } else
3744 brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
3745 ci->name, drivestrength);
3746 break;
3747 case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
3748 str_tab = sdiod_drive_strength_tab5_1v8;
3749 str_mask = 0x00003800;
3750 str_shift = 11;
3751 break;
3752 default:
3753 brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
3754 ci->name, ci->chiprev, ci->pmurev);
3755 break;
3756 }
3757
3758 if (str_tab != NULL) {
3759 for (i = 0; str_tab[i].strength != 0; i++) {
3760 if (drivestrength >= str_tab[i].strength) {
3761 drivestrength_sel = str_tab[i].sel;
3762 break;
3763 }
3764 }
3765 base = brcmf_chip_get_chipcommon(ci)->base;
3766 addr = CORE_CC_REG(base, chipcontrol_addr);
3767 brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
3768 cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3769 cc_data_temp &= ~str_mask;
3770 drivestrength_sel <<= str_shift;
3771 cc_data_temp |= drivestrength_sel;
3772 brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
3773
3774 brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
3775 str_tab[i].strength, drivestrength, cc_data_temp);
3776 }
3777}
3778
3779static int brcmf_sdio_buscoreprep(void *ctx)
3780{
3781 struct brcmf_sdio_dev *sdiodev = ctx;
3782 int err = 0;
3783 u8 clkval, clkset;
3784
3785 /* Try forcing SDIO core to do ALPAvail request only */
3786 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
3787 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3788 if (err) {
3789 brcmf_err("error writing for HT off\n");
3790 return err;
3791 }
3792
3793 /* If register supported, wait for ALPAvail and then force ALP */
3794 /* This may take up to 15 milliseconds */
3795 clkval = brcmf_sdiod_regrb(sdiodev,
3796 SBSDIO_FUNC1_CHIPCLKCSR, NULL);
3797
3798 if ((clkval & ~SBSDIO_AVBITS) != clkset) {
3799 brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
3800 clkset, clkval);
3801 return -EACCES;
3802 }
3803
3804 SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
3805 SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
3806 !SBSDIO_ALPAV(clkval)),
3807 PMU_MAX_TRANSITION_DLY);
3808 if (!SBSDIO_ALPAV(clkval)) {
3809 brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
3810 clkval);
3811 return -EBUSY;
3812 }
3813
3814 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
3815 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3816 udelay(65);
3817
3818 /* Also, disable the extra SDIO pull-ups */
3819 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
3820
3821 return 0;
3822}
3823
3824static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
3825 u32 rstvec)
3826{
3827 struct brcmf_sdio_dev *sdiodev = ctx;
3828 struct brcmf_core *core;
3829 u32 reg_addr;
3830
3831 /* clear all interrupts */
3832 core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
3833 reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
3834 brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
3835
3836 if (rstvec)
3837 /* Write reset vector to address 0 */
3838 brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
3839 sizeof(rstvec));
3840}
3841
3842static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
3843{
3844 struct brcmf_sdio_dev *sdiodev = ctx;
3845 u32 val, rev;
3846
3847 val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3848 if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
3849 addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
3850 rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
3851 if (rev >= 2) {
3852 val &= ~CID_ID_MASK;
3853 val |= BCM4339_CHIP_ID;
3854 }
3855 }
3856 return val;
3857}
3858
3859static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
3860{
3861 struct brcmf_sdio_dev *sdiodev = ctx;
3862
3863 brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
3864}
3865
3866static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
3867 .prepare = brcmf_sdio_buscoreprep,
3868 .exit_dl = brcmf_sdio_buscore_exitdl,
3869 .read32 = brcmf_sdio_buscore_read32,
3870 .write32 = brcmf_sdio_buscore_write32,
3871};
3872
3722static bool 3873static bool
3723brcmf_sdio_probe_attach(struct brcmf_sdio *bus) 3874brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3724{ 3875{
@@ -3734,7 +3885,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3734 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL)); 3885 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3735 3886
3736 /* 3887 /*
3737 * Force PLL off until brcmf_sdio_chip_attach() 3888 * Force PLL off until brcmf_chip_attach()
3738 * programs PLL control regs 3889 * programs PLL control regs
3739 */ 3890 */
3740 3891
@@ -3755,8 +3906,10 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3755 */ 3906 */
3756 brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN); 3907 brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
3757 3908
3758 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) { 3909 bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
3759 brcmf_err("brcmf_sdio_chip_attach failed!\n"); 3910 if (IS_ERR(bus->ci)) {
3911 brcmf_err("brcmf_chip_attach failed!\n");
3912 bus->ci = NULL;
3760 goto fail; 3913 goto fail;
3761 } 3914 }
3762 3915
@@ -3769,7 +3922,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3769 drivestrength = bus->sdiodev->pdata->drive_strength; 3922 drivestrength = bus->sdiodev->pdata->drive_strength;
3770 else 3923 else
3771 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH; 3924 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3772 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength); 3925 brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3773 3926
3774 /* Get info on the SOCRAM cores... */ 3927 /* Get info on the SOCRAM cores... */
3775 bus->ramsize = bus->ci->ramsize; 3928 bus->ramsize = bus->ci->ramsize;
@@ -3792,24 +3945,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3792 goto fail; 3945 goto fail;
3793 3946
3794 /* set PMUControl so a backplane reset does PMU state reload */ 3947 /* set PMUControl so a backplane reset does PMU state reload */
3795 reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base, 3948 reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
3796 pmucontrol); 3949 pmucontrol);
3797 reg_val = brcmf_sdiod_regrl(bus->sdiodev, 3950 reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
3798 reg_addr,
3799 &err);
3800 if (err) 3951 if (err)
3801 goto fail; 3952 goto fail;
3802 3953
3803 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT); 3954 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3804 3955
3805 brcmf_sdiod_regwl(bus->sdiodev, 3956 brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
3806 reg_addr,
3807 reg_val,
3808 &err);
3809 if (err) 3957 if (err)
3810 goto fail; 3958 goto fail;
3811 3959
3812
3813 sdio_release_host(bus->sdiodev->func[1]); 3960 sdio_release_host(bus->sdiodev->func[1]);
3814 3961
3815 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN); 3962 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3849,6 +3996,7 @@ brcmf_sdio_watchdog_thread(void *data)
3849 brcmf_sdio_bus_watchdog(bus); 3996 brcmf_sdio_bus_watchdog(bus);
3850 /* Count the tick for reference */ 3997 /* Count the tick for reference */
3851 bus->sdcnt.tickcnt++; 3998 bus->sdcnt.tickcnt++;
3999 reinit_completion(&bus->watchdog_wait);
3852 } else 4000 } else
3853 break; 4001 break;
3854 } 4002 }
@@ -3925,7 +4073,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
3925 } 4073 }
3926 4074
3927 spin_lock_init(&bus->rxctl_lock); 4075 spin_lock_init(&bus->rxctl_lock);
3928 spin_lock_init(&bus->txqlock); 4076 spin_lock_init(&bus->txq_lock);
4077 sema_init(&bus->tx_seq_lock, 1);
3929 init_waitqueue_head(&bus->ctrl_wait); 4078 init_waitqueue_head(&bus->ctrl_wait);
3930 init_waitqueue_head(&bus->dcmd_resp_wait); 4079 init_waitqueue_head(&bus->dcmd_resp_wait);
3931 4080
@@ -4024,14 +4173,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4024 /* De-register interrupt handler */ 4173 /* De-register interrupt handler */
4025 brcmf_sdiod_intr_unregister(bus->sdiodev); 4174 brcmf_sdiod_intr_unregister(bus->sdiodev);
4026 4175
4027 cancel_work_sync(&bus->datawork);
4028 if (bus->brcmf_wq)
4029 destroy_workqueue(bus->brcmf_wq);
4030
4031 if (bus->sdiodev->bus_if->drvr) { 4176 if (bus->sdiodev->bus_if->drvr) {
4032 brcmf_detach(bus->sdiodev->dev); 4177 brcmf_detach(bus->sdiodev->dev);
4033 } 4178 }
4034 4179
4180 cancel_work_sync(&bus->datawork);
4181 if (bus->brcmf_wq)
4182 destroy_workqueue(bus->brcmf_wq);
4183
4035 if (bus->ci) { 4184 if (bus->ci) {
4036 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) { 4185 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
4037 sdio_claim_host(bus->sdiodev->func[1]); 4186 sdio_claim_host(bus->sdiodev->func[1]);
@@ -4042,12 +4191,11 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4042 * all necessary cores. 4191 * all necessary cores.
4043 */ 4192 */
4044 msleep(20); 4193 msleep(20);
4045 brcmf_sdio_chip_enter_download(bus->sdiodev, 4194 brcmf_chip_enter_download(bus->ci);
4046 bus->ci);
4047 brcmf_sdio_clkctl(bus, CLK_NONE, false); 4195 brcmf_sdio_clkctl(bus, CLK_NONE, false);
4048 sdio_release_host(bus->sdiodev->func[1]); 4196 sdio_release_host(bus->sdiodev->func[1]);
4049 } 4197 }
4050 brcmf_sdio_chip_detach(&bus->ci); 4198 brcmf_chip_detach(bus->ci);
4051 } 4199 }
4052 4200
4053 kfree(bus->rxbuf); 4201 kfree(bus->rxbuf);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 22adbe311d20..59a5af5bf994 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -124,7 +124,8 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
124} 124}
125 125
126static u32 126static u32
127brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen) 127brcmf_create_iovar(char *name, const char *data, u32 datalen,
128 char *buf, u32 buflen)
128{ 129{
129 u32 len; 130 u32 len;
130 131
@@ -144,7 +145,7 @@ brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
144 145
145 146
146s32 147s32
147brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data, 148brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
148 u32 len) 149 u32 len)
149{ 150{
150 struct brcmf_pub *drvr = ifp->drvr; 151 struct brcmf_pub *drvr = ifp->drvr;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 77eae86e55c2..a30be683f4a1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -83,7 +83,7 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
83s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data); 83s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
84s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data); 84s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
85 85
86s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data, 86s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
87 u32 len); 87 u32 len);
88s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data, 88s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
89 u32 len); 89 u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index af17a5bc8b83..614e4888504f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -48,6 +48,11 @@
48 48
49#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */ 49#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
50 50
51/* OBSS Coex Auto/On/Off */
52#define BRCMF_OBSS_COEX_AUTO (-1)
53#define BRCMF_OBSS_COEX_OFF 0
54#define BRCMF_OBSS_COEX_ON 1
55
51enum brcmf_fil_p2p_if_types { 56enum brcmf_fil_p2p_if_types {
52 BRCMF_FIL_P2P_IF_CLIENT, 57 BRCMF_FIL_P2P_IF_CLIENT,
53 BRCMF_FIL_P2P_IF_GO, 58 BRCMF_FIL_P2P_IF_GO,
@@ -87,6 +92,11 @@ struct brcmf_fil_bss_enable_le {
87 __le32 enable; 92 __le32 enable;
88}; 93};
89 94
95struct brcmf_fil_bwcap_le {
96 __le32 band;
97 __le32 bw_cap;
98};
99
90/** 100/**
91 * struct tdls_iovar - common structure for tdls iovars. 101 * struct tdls_iovar - common structure for tdls iovars.
92 * 102 *
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index fc4f98b275d7..f3445ac627e4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -797,7 +797,8 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
797 /* SOCIAL CHANNELS 1, 6, 11 */ 797 /* SOCIAL CHANNELS 1, 6, 11 */
798 search_state = WL_P2P_DISC_ST_SEARCH; 798 search_state = WL_P2P_DISC_ST_SEARCH;
799 brcmf_dbg(INFO, "P2P SEARCH PHASE START\n"); 799 brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
800 } else if (dev != NULL && vif->mode == WL_MODE_AP) { 800 } else if (dev != NULL &&
801 vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
801 /* If you are already a GO, then do SEARCH only */ 802 /* If you are already a GO, then do SEARCH only */
802 brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n"); 803 brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
803 search_state = WL_P2P_DISC_ST_SEARCH; 804 search_state = WL_P2P_DISC_ST_SEARCH;
@@ -2256,7 +2257,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
2256 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 2257 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
2257 struct brcmf_cfg80211_vif *vif; 2258 struct brcmf_cfg80211_vif *vif;
2258 enum brcmf_fil_p2p_if_types iftype; 2259 enum brcmf_fil_p2p_if_types iftype;
2259 enum wl_mode mode;
2260 int err; 2260 int err;
2261 2261
2262 if (brcmf_cfg80211_vif_event_armed(cfg)) 2262 if (brcmf_cfg80211_vif_event_armed(cfg))
@@ -2267,11 +2267,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
2267 switch (type) { 2267 switch (type) {
2268 case NL80211_IFTYPE_P2P_CLIENT: 2268 case NL80211_IFTYPE_P2P_CLIENT:
2269 iftype = BRCMF_FIL_P2P_IF_CLIENT; 2269 iftype = BRCMF_FIL_P2P_IF_CLIENT;
2270 mode = WL_MODE_BSS;
2271 break; 2270 break;
2272 case NL80211_IFTYPE_P2P_GO: 2271 case NL80211_IFTYPE_P2P_GO:
2273 iftype = BRCMF_FIL_P2P_IF_GO; 2272 iftype = BRCMF_FIL_P2P_IF_GO;
2274 mode = WL_MODE_AP;
2275 break; 2273 break;
2276 case NL80211_IFTYPE_P2P_DEVICE: 2274 case NL80211_IFTYPE_P2P_DEVICE:
2277 return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy, 2275 return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
deleted file mode 100644
index 82bf3c5d3cdc..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ /dev/null
@@ -1,972 +0,0 @@
1/*
2 * Copyright (c) 2011 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16/* ***** SDIO interface chip backplane handle functions ***** */
17
18#include <linux/types.h>
19#include <linux/netdevice.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/sdio_func.h>
22#include <linux/mmc/sdio_ids.h>
23#include <linux/ssb/ssb_regs.h>
24#include <linux/bcma/bcma.h>
25
26#include <chipcommon.h>
27#include <brcm_hw_ids.h>
28#include <brcmu_wifi.h>
29#include <brcmu_utils.h>
30#include <soc.h>
31#include "dhd_dbg.h"
32#include "sdio_host.h"
33#include "sdio_chip.h"
34
35/* chip core base & ramsize */
36/* bcm4329 */
37/* SDIO device core, ID 0x829 */
38#define BCM4329_CORE_BUS_BASE 0x18011000
39/* internal memory core, ID 0x80e */
40#define BCM4329_CORE_SOCRAM_BASE 0x18003000
41/* ARM Cortex M3 core, ID 0x82a */
42#define BCM4329_CORE_ARM_BASE 0x18002000
43#define BCM4329_RAMSIZE 0x48000
44
45/* bcm43143 */
46/* SDIO device core */
47#define BCM43143_CORE_BUS_BASE 0x18002000
48/* internal memory core */
49#define BCM43143_CORE_SOCRAM_BASE 0x18004000
50/* ARM Cortex M3 core, ID 0x82a */
51#define BCM43143_CORE_ARM_BASE 0x18003000
52#define BCM43143_RAMSIZE 0x70000
53
54/* All D11 cores, ID 0x812 */
55#define BCM43xx_CORE_D11_BASE 0x18001000
56
57#define SBCOREREV(sbidh) \
58 ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
59 ((sbidh) & SSB_IDHIGH_RCLO))
60
61/* SOC Interconnect types (aka chip types) */
62#define SOCI_SB 0
63#define SOCI_AI 1
64
65/* EROM CompIdentB */
66#define CIB_REV_MASK 0xff000000
67#define CIB_REV_SHIFT 24
68
69/* ARM CR4 core specific control flag bits */
70#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
71
72/* D11 core specific control flag bits */
73#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
74#define D11_BCMA_IOCTL_PHYRESET 0x0008
75
76#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
77/* SDIO Pad drive strength to select value mappings */
78struct sdiod_drive_str {
79 u8 strength; /* Pad Drive Strength in mA */
80 u8 sel; /* Chip-specific select value */
81};
82/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
83static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
84 {32, 0x6},
85 {26, 0x7},
86 {22, 0x4},
87 {16, 0x5},
88 {12, 0x2},
89 {8, 0x3},
90 {4, 0x0},
91 {0, 0x1}
92};
93
94/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
95static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
96 {6, 0x7},
97 {5, 0x6},
98 {4, 0x5},
99 {3, 0x4},
100 {2, 0x2},
101 {1, 0x1},
102 {0, 0x0}
103};
104
105/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
106static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
107 {3, 0x3},
108 {2, 0x2},
109 {1, 0x1},
110 {0, 0x0} };
111
112/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
113static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
114 {16, 0x7},
115 {12, 0x5},
116 {8, 0x3},
117 {4, 0x1}
118};
119
120u8
121brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
122{
123 u8 idx;
124
125 for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
126 if (coreid == ci->c_inf[idx].id)
127 return idx;
128
129 return BRCMF_MAX_CORENUM;
130}
131
132static u32
133brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
134 struct brcmf_chip *ci, u16 coreid)
135{
136 u32 regdata;
137 u8 idx;
138
139 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
140
141 regdata = brcmf_sdiod_regrl(sdiodev,
142 CORE_SB(ci->c_inf[idx].base, sbidhigh),
143 NULL);
144 return SBCOREREV(regdata);
145}
146
147static u32
148brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
149 struct brcmf_chip *ci, u16 coreid)
150{
151 u8 idx;
152
153 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
154
155 return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
156}
157
158static bool
159brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
160 struct brcmf_chip *ci, u16 coreid)
161{
162 u32 regdata;
163 u8 idx;
164
165 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
166 if (idx == BRCMF_MAX_CORENUM)
167 return false;
168
169 regdata = brcmf_sdiod_regrl(sdiodev,
170 CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
171 NULL);
172 regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
173 SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
174 return (SSB_TMSLOW_CLOCK == regdata);
175}
176
177static bool
178brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
179 struct brcmf_chip *ci, u16 coreid)
180{
181 u32 regdata;
182 u8 idx;
183 bool ret;
184
185 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
186 if (idx == BRCMF_MAX_CORENUM)
187 return false;
188
189 regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
190 NULL);
191 ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
192
193 regdata = brcmf_sdiod_regrl(sdiodev,
194 ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
195 NULL);
196 ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
197
198 return ret;
199}
200
201static void
202brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
203 struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
204 u32 in_resetbits)
205{
206 u32 regdata, base;
207 u8 idx;
208
209 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
210 base = ci->c_inf[idx].base;
211
212 regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
213 if (regdata & SSB_TMSLOW_RESET)
214 return;
215
216 regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
217 if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
218 /*
219 * set target reject and spin until busy is clear
220 * (preserve core-specific bits)
221 */
222 regdata = brcmf_sdiod_regrl(sdiodev,
223 CORE_SB(base, sbtmstatelow), NULL);
224 brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
225 regdata | SSB_TMSLOW_REJECT, NULL);
226
227 regdata = brcmf_sdiod_regrl(sdiodev,
228 CORE_SB(base, sbtmstatelow), NULL);
229 udelay(1);
230 SPINWAIT((brcmf_sdiod_regrl(sdiodev,
231 CORE_SB(base, sbtmstatehigh),
232 NULL) &
233 SSB_TMSHIGH_BUSY), 100000);
234
235 regdata = brcmf_sdiod_regrl(sdiodev,
236 CORE_SB(base, sbtmstatehigh),
237 NULL);
238 if (regdata & SSB_TMSHIGH_BUSY)
239 brcmf_err("core state still busy\n");
240
241 regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
242 NULL);
243 if (regdata & SSB_IDLOW_INITIATOR) {
244 regdata = brcmf_sdiod_regrl(sdiodev,
245 CORE_SB(base, sbimstate),
246 NULL);
247 regdata |= SSB_IMSTATE_REJECT;
248 brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
249 regdata, NULL);
250 regdata = brcmf_sdiod_regrl(sdiodev,
251 CORE_SB(base, sbimstate),
252 NULL);
253 udelay(1);
254 SPINWAIT((brcmf_sdiod_regrl(sdiodev,
255 CORE_SB(base, sbimstate),
256 NULL) &
257 SSB_IMSTATE_BUSY), 100000);
258 }
259
260 /* set reset and reject while enabling the clocks */
261 regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
262 SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
263 brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
264 regdata, NULL);
265 regdata = brcmf_sdiod_regrl(sdiodev,
266 CORE_SB(base, sbtmstatelow), NULL);
267 udelay(10);
268
269 /* clear the initiator reject bit */
270 regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
271 NULL);
272 if (regdata & SSB_IDLOW_INITIATOR) {
273 regdata = brcmf_sdiod_regrl(sdiodev,
274 CORE_SB(base, sbimstate),
275 NULL);
276 regdata &= ~SSB_IMSTATE_REJECT;
277 brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
278 regdata, NULL);
279 }
280 }
281
282 /* leave reset and reject asserted */
283 brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
284 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
285 udelay(1);
286}
287
288static void
289brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
290 struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
291 u32 in_resetbits)
292{
293 u8 idx;
294 u32 regdata;
295 u32 wrapbase;
296
297 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
298 if (idx == BRCMF_MAX_CORENUM)
299 return;
300
301 wrapbase = ci->c_inf[idx].wrapbase;
302
303 /* if core is already in reset, just return */
304 regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
305 if ((regdata & BCMA_RESET_CTL_RESET) != 0)
306 return;
307
308 /* configure reset */
309 brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
310 BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
311 regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
312
313 /* put in reset */
314 brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
315 BCMA_RESET_CTL_RESET, NULL);
316 usleep_range(10, 20);
317
318 /* wait till reset is 1 */
319 SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
320 BCMA_RESET_CTL_RESET, 300);
321
322 /* post reset configure */
323 brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
324 BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
325 regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
326}
327
328static void
329brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
330 struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
331 u32 in_resetbits, u32 post_resetbits)
332{
333 u32 regdata;
334 u8 idx;
335
336 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
337 if (idx == BRCMF_MAX_CORENUM)
338 return;
339
340 /*
341 * Must do the disable sequence first to work for
342 * arbitrary current core state.
343 */
344 brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
345 in_resetbits);
346
347 /*
348 * Now do the initialization sequence.
349 * set reset while enabling the clock and
350 * forcing them on throughout the core
351 */
352 brcmf_sdiod_regwl(sdiodev,
353 CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
354 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
355 NULL);
356 regdata = brcmf_sdiod_regrl(sdiodev,
357 CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
358 NULL);
359 udelay(1);
360
361 /* clear any serror */
362 regdata = brcmf_sdiod_regrl(sdiodev,
363 CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
364 NULL);
365 if (regdata & SSB_TMSHIGH_SERR)
366 brcmf_sdiod_regwl(sdiodev,
367 CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
368 0, NULL);
369
370 regdata = brcmf_sdiod_regrl(sdiodev,
371 CORE_SB(ci->c_inf[idx].base, sbimstate),
372 NULL);
373 if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
374 brcmf_sdiod_regwl(sdiodev,
375 CORE_SB(ci->c_inf[idx].base, sbimstate),
376 regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
377 NULL);
378
379 /* clear reset and allow it to propagate throughout the core */
380 brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
381 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
382 regdata = brcmf_sdiod_regrl(sdiodev,
383 CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
384 NULL);
385 udelay(1);
386
387 /* leave clock enabled */
388 brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
389 SSB_TMSLOW_CLOCK, NULL);
390 regdata = brcmf_sdiod_regrl(sdiodev,
391 CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
392 NULL);
393 udelay(1);
394}
395
396static void
397brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
398 struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
399 u32 in_resetbits, u32 post_resetbits)
400{
401 u8 idx;
402 u32 regdata;
403 u32 wrapbase;
404
405 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
406 if (idx == BRCMF_MAX_CORENUM)
407 return;
408
409 wrapbase = ci->c_inf[idx].wrapbase;
410
411 /* must disable first to work for arbitrary current core state */
412 brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
413 in_resetbits);
414
415 while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
416 BCMA_RESET_CTL_RESET) {
417 brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
418 usleep_range(40, 60);
419 }
420
421 brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
422 BCMA_IOCTL_CLK, NULL);
423 regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
424}
425
426#ifdef DEBUG
427/* safety check for chipinfo */
428static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
429{
430 u8 core_idx;
431
432 /* check RAM core presence for ARM CM3 core */
433 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
434 if (BRCMF_MAX_CORENUM != core_idx) {
435 core_idx = brcmf_sdio_chip_getinfidx(ci,
436 BCMA_CORE_INTERNAL_MEM);
437 if (BRCMF_MAX_CORENUM == core_idx) {
438 brcmf_err("RAM core not provided with ARM CM3 core\n");
439 return -ENODEV;
440 }
441 }
442
443 /* check RAM base for ARM CR4 core */
444 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
445 if (BRCMF_MAX_CORENUM != core_idx) {
446 if (ci->rambase == 0) {
447 brcmf_err("RAM base not provided with ARM CR4 core\n");
448 return -ENOMEM;
449 }
450 }
451
452 return 0;
453}
454#else /* DEBUG */
455static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
456{
457 return 0;
458}
459#endif
460
461static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
462 struct brcmf_chip *ci)
463{
464 u32 regdata;
465 u32 socitype;
466
467 /* Get CC core rev
468 * Chipid is assume to be at offset 0 from SI_ENUM_BASE
469 * For different chiptypes or old sdio hosts w/o chipcommon,
470 * other ways of recognition should be added here.
471 */
472 regdata = brcmf_sdiod_regrl(sdiodev,
473 CORE_CC_REG(SI_ENUM_BASE, chipid),
474 NULL);
475 ci->chip = regdata & CID_ID_MASK;
476 ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
477 if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
478 ci->chiprev >= 2)
479 ci->chip = BCM4339_CHIP_ID;
480 socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
481
482 brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
483 socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
484
485 if (socitype == SOCI_SB) {
486 if (ci->chip != BCM4329_CHIP_ID) {
487 brcmf_err("SB chip is not supported\n");
488 return -ENODEV;
489 }
490 ci->iscoreup = brcmf_sdio_sb_iscoreup;
491 ci->corerev = brcmf_sdio_sb_corerev;
492 ci->coredisable = brcmf_sdio_sb_coredisable;
493 ci->resetcore = brcmf_sdio_sb_resetcore;
494
495 ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
496 ci->c_inf[0].base = SI_ENUM_BASE;
497 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
498 ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
499 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
500 ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
501 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
502 ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
503 ci->c_inf[4].id = BCMA_CORE_80211;
504 ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
505 ci->ramsize = BCM4329_RAMSIZE;
506 } else if (socitype == SOCI_AI) {
507 ci->iscoreup = brcmf_sdio_ai_iscoreup;
508 ci->corerev = brcmf_sdio_ai_corerev;
509 ci->coredisable = brcmf_sdio_ai_coredisable;
510 ci->resetcore = brcmf_sdio_ai_resetcore;
511
512 ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
513 ci->c_inf[0].base = SI_ENUM_BASE;
514
515 /* Address of cores for new chips should be added here */
516 switch (ci->chip) {
517 case BCM43143_CHIP_ID:
518 ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
519 ci->c_inf[0].cib = 0x2b000000;
520 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
521 ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
522 ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
523 ci->c_inf[1].cib = 0x18000000;
524 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
525 ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
526 ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
527 ci->c_inf[2].cib = 0x14000000;
528 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
529 ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
530 ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
531 ci->c_inf[3].cib = 0x07000000;
532 ci->c_inf[4].id = BCMA_CORE_80211;
533 ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
534 ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
535 ci->ramsize = BCM43143_RAMSIZE;
536 break;
537 case BCM43241_CHIP_ID:
538 ci->c_inf[0].wrapbase = 0x18100000;
539 ci->c_inf[0].cib = 0x2a084411;
540 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
541 ci->c_inf[1].base = 0x18002000;
542 ci->c_inf[1].wrapbase = 0x18102000;
543 ci->c_inf[1].cib = 0x0e004211;
544 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
545 ci->c_inf[2].base = 0x18004000;
546 ci->c_inf[2].wrapbase = 0x18104000;
547 ci->c_inf[2].cib = 0x14080401;
548 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
549 ci->c_inf[3].base = 0x18003000;
550 ci->c_inf[3].wrapbase = 0x18103000;
551 ci->c_inf[3].cib = 0x07004211;
552 ci->c_inf[4].id = BCMA_CORE_80211;
553 ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
554 ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
555 ci->ramsize = 0x90000;
556 break;
557 case BCM4330_CHIP_ID:
558 ci->c_inf[0].wrapbase = 0x18100000;
559 ci->c_inf[0].cib = 0x27004211;
560 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
561 ci->c_inf[1].base = 0x18002000;
562 ci->c_inf[1].wrapbase = 0x18102000;
563 ci->c_inf[1].cib = 0x07004211;
564 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
565 ci->c_inf[2].base = 0x18004000;
566 ci->c_inf[2].wrapbase = 0x18104000;
567 ci->c_inf[2].cib = 0x0d080401;
568 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
569 ci->c_inf[3].base = 0x18003000;
570 ci->c_inf[3].wrapbase = 0x18103000;
571 ci->c_inf[3].cib = 0x03004211;
572 ci->c_inf[4].id = BCMA_CORE_80211;
573 ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
574 ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
575 ci->ramsize = 0x48000;
576 break;
577 case BCM4334_CHIP_ID:
578 ci->c_inf[0].wrapbase = 0x18100000;
579 ci->c_inf[0].cib = 0x29004211;
580 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
581 ci->c_inf[1].base = 0x18002000;
582 ci->c_inf[1].wrapbase = 0x18102000;
583 ci->c_inf[1].cib = 0x0d004211;
584 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
585 ci->c_inf[2].base = 0x18004000;
586 ci->c_inf[2].wrapbase = 0x18104000;
587 ci->c_inf[2].cib = 0x13080401;
588 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
589 ci->c_inf[3].base = 0x18003000;
590 ci->c_inf[3].wrapbase = 0x18103000;
591 ci->c_inf[3].cib = 0x07004211;
592 ci->c_inf[4].id = BCMA_CORE_80211;
593 ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
594 ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
595 ci->ramsize = 0x80000;
596 break;
597 case BCM4335_CHIP_ID:
598 ci->c_inf[0].wrapbase = 0x18100000;
599 ci->c_inf[0].cib = 0x2b084411;
600 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
601 ci->c_inf[1].base = 0x18005000;
602 ci->c_inf[1].wrapbase = 0x18105000;
603 ci->c_inf[1].cib = 0x0f004211;
604 ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
605 ci->c_inf[2].base = 0x18002000;
606 ci->c_inf[2].wrapbase = 0x18102000;
607 ci->c_inf[2].cib = 0x01084411;
608 ci->c_inf[3].id = BCMA_CORE_80211;
609 ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
610 ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
611 ci->ramsize = 0xc0000;
612 ci->rambase = 0x180000;
613 break;
614 case BCM43362_CHIP_ID:
615 ci->c_inf[0].wrapbase = 0x18100000;
616 ci->c_inf[0].cib = 0x27004211;
617 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
618 ci->c_inf[1].base = 0x18002000;
619 ci->c_inf[1].wrapbase = 0x18102000;
620 ci->c_inf[1].cib = 0x0a004211;
621 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
622 ci->c_inf[2].base = 0x18004000;
623 ci->c_inf[2].wrapbase = 0x18104000;
624 ci->c_inf[2].cib = 0x08080401;
625 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
626 ci->c_inf[3].base = 0x18003000;
627 ci->c_inf[3].wrapbase = 0x18103000;
628 ci->c_inf[3].cib = 0x03004211;
629 ci->c_inf[4].id = BCMA_CORE_80211;
630 ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
631 ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
632 ci->ramsize = 0x3C000;
633 break;
634 case BCM4339_CHIP_ID:
635 ci->c_inf[0].wrapbase = 0x18100000;
636 ci->c_inf[0].cib = 0x2e084411;
637 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
638 ci->c_inf[1].base = 0x18005000;
639 ci->c_inf[1].wrapbase = 0x18105000;
640 ci->c_inf[1].cib = 0x15004211;
641 ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
642 ci->c_inf[2].base = 0x18002000;
643 ci->c_inf[2].wrapbase = 0x18102000;
644 ci->c_inf[2].cib = 0x04084411;
645 ci->c_inf[3].id = BCMA_CORE_80211;
646 ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
647 ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
648 ci->ramsize = 0xc0000;
649 ci->rambase = 0x180000;
650 break;
651 default:
652 brcmf_err("AXI chip is not supported\n");
653 return -ENODEV;
654 }
655 } else {
656 brcmf_err("chip backplane type %u is not supported\n",
657 socitype);
658 return -ENODEV;
659 }
660
661 return brcmf_sdio_chip_cichk(ci);
662}
663
664static int
665brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
666{
667 int err = 0;
668 u8 clkval, clkset;
669
670 /* Try forcing SDIO core to do ALPAvail request only */
671 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
672 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
673 if (err) {
674 brcmf_err("error writing for HT off\n");
675 return err;
676 }
677
678 /* If register supported, wait for ALPAvail and then force ALP */
679 /* This may take up to 15 milliseconds */
680 clkval = brcmf_sdiod_regrb(sdiodev,
681 SBSDIO_FUNC1_CHIPCLKCSR, NULL);
682
683 if ((clkval & ~SBSDIO_AVBITS) != clkset) {
684 brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
685 clkset, clkval);
686 return -EACCES;
687 }
688
689 SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
690 SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
691 !SBSDIO_ALPAV(clkval)),
692 PMU_MAX_TRANSITION_DLY);
693 if (!SBSDIO_ALPAV(clkval)) {
694 brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
695 clkval);
696 return -EBUSY;
697 }
698
699 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
700 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
701 udelay(65);
702
703 /* Also, disable the extra SDIO pull-ups */
704 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
705
706 return 0;
707}
708
709static void
710brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
711 struct brcmf_chip *ci)
712{
713 u32 base = ci->c_inf[0].base;
714
715 /* get chipcommon rev */
716 ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
717
718 /* get chipcommon capabilites */
719 ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
720 CORE_CC_REG(base, capabilities),
721 NULL);
722
723 /* get pmu caps & rev */
724 if (ci->c_inf[0].caps & CC_CAP_PMU) {
725 ci->pmucaps =
726 brcmf_sdiod_regrl(sdiodev,
727 CORE_CC_REG(base, pmucapabilities),
728 NULL);
729 ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
730 }
731
732 ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
733
734 brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
735 ci->c_inf[0].rev, ci->pmurev,
736 ci->c_inf[1].rev, ci->c_inf[1].id);
737
738 /*
739 * Make sure any on-chip ARM is off (in case strapping is wrong),
740 * or downloaded code was already running.
741 */
742 ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
743}
744
745int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
746 struct brcmf_chip **ci_ptr)
747{
748 int ret;
749 struct brcmf_chip *ci;
750
751 brcmf_dbg(TRACE, "Enter\n");
752
753 ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
754 if (!ci)
755 return -ENOMEM;
756
757 ret = brcmf_sdio_chip_buscoreprep(sdiodev);
758 if (ret != 0)
759 goto err;
760
761 ret = brcmf_sdio_chip_recognition(sdiodev, ci);
762 if (ret != 0)
763 goto err;
764
765 brcmf_sdio_chip_buscoresetup(sdiodev, ci);
766
767 brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
768 0, NULL);
769 brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
770 0, NULL);
771
772 *ci_ptr = ci;
773 return 0;
774
775err:
776 kfree(ci);
777 return ret;
778}
779
780void
781brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
782{
783 brcmf_dbg(TRACE, "Enter\n");
784
785 kfree(*ci_ptr);
786 *ci_ptr = NULL;
787}
788
789static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
790{
791 const char *fmt;
792
793 fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
794 snprintf(buf, len, fmt, chipid);
795 return buf;
796}
797
798void
799brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
800 struct brcmf_chip *ci, u32 drivestrength)
801{
802 const struct sdiod_drive_str *str_tab = NULL;
803 u32 str_mask;
804 u32 str_shift;
805 char chn[8];
806 u32 base = ci->c_inf[0].base;
807 u32 i;
808 u32 drivestrength_sel = 0;
809 u32 cc_data_temp;
810 u32 addr;
811
812 if (!(ci->c_inf[0].caps & CC_CAP_PMU))
813 return;
814
815 switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
816 case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
817 str_tab = sdiod_drvstr_tab1_1v8;
818 str_mask = 0x00003800;
819 str_shift = 11;
820 break;
821 case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
822 str_tab = sdiod_drvstr_tab6_1v8;
823 str_mask = 0x00001800;
824 str_shift = 11;
825 break;
826 case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
827 /* note: 43143 does not support tristate */
828 i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
829 if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
830 str_tab = sdiod_drvstr_tab2_3v3;
831 str_mask = 0x00000007;
832 str_shift = 0;
833 } else
834 brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
835 brcmf_sdio_chip_name(ci->chip, chn, 8),
836 drivestrength);
837 break;
838 case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
839 str_tab = sdiod_drive_strength_tab5_1v8;
840 str_mask = 0x00003800;
841 str_shift = 11;
842 break;
843 default:
844 brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
845 brcmf_sdio_chip_name(ci->chip, chn, 8),
846 ci->chiprev, ci->pmurev);
847 break;
848 }
849
850 if (str_tab != NULL) {
851 for (i = 0; str_tab[i].strength != 0; i++) {
852 if (drivestrength >= str_tab[i].strength) {
853 drivestrength_sel = str_tab[i].sel;
854 break;
855 }
856 }
857 addr = CORE_CC_REG(base, chipcontrol_addr);
858 brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
859 cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
860 cc_data_temp &= ~str_mask;
861 drivestrength_sel <<= str_shift;
862 cc_data_temp |= drivestrength_sel;
863 brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
864
865 brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
866 str_tab[i].strength, drivestrength, cc_data_temp);
867 }
868}
869
870static void
871brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
872 struct brcmf_chip *ci)
873{
874 ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
875 ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
876 D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
877 D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
878 ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
879}
880
881static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
882 struct brcmf_chip *ci)
883{
884 u8 core_idx;
885 u32 reg_addr;
886
887 if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
888 brcmf_err("SOCRAM core is down after reset?\n");
889 return false;
890 }
891
892 /* clear all interrupts */
893 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
894 reg_addr = ci->c_inf[core_idx].base;
895 reg_addr += offsetof(struct sdpcmd_regs, intstatus);
896 brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
897
898 ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
899
900 return true;
901}
902
903static inline void
904brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
905 struct brcmf_chip *ci)
906{
907 u8 idx;
908 u32 regdata;
909 u32 wrapbase;
910 idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
911
912 if (idx == BRCMF_MAX_CORENUM)
913 return;
914
915 wrapbase = ci->c_inf[idx].wrapbase;
916 regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
917 regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
918 ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
919 ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
920 ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
921 D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
922 D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
923}
924
925static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
926 struct brcmf_chip *ci, u32 rstvec)
927{
928 u8 core_idx;
929 u32 reg_addr;
930
931 /* clear all interrupts */
932 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
933 reg_addr = ci->c_inf[core_idx].base;
934 reg_addr += offsetof(struct sdpcmd_regs, intstatus);
935 brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
936
937 /* Write reset vector to address 0 */
938 brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
939 sizeof(rstvec));
940
941 /* restore ARM */
942 ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
943 0, 0);
944
945 return true;
946}
947
948void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
949 struct brcmf_chip *ci)
950{
951 u8 arm_core_idx;
952
953 arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
954 if (BRCMF_MAX_CORENUM != arm_core_idx) {
955 brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
956 return;
957 }
958
959 brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
960}
961
962bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
963 struct brcmf_chip *ci, u32 rstvec)
964{
965 u8 arm_core_idx;
966
967 arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
968 if (BRCMF_MAX_CORENUM != arm_core_idx)
969 return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
970
971 return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
972}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
deleted file mode 100644
index fb0614329ede..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ /dev/null
@@ -1,231 +0,0 @@
1/*
2 * Copyright (c) 2011 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _BRCMFMAC_SDIO_CHIP_H_
18#define _BRCMFMAC_SDIO_CHIP_H_
19
20/*
21 * Core reg address translation.
22 * Both macro's returns a 32 bits byte address on the backplane bus.
23 */
24#define CORE_CC_REG(base, field) \
25 (base + offsetof(struct chipcregs, field))
26#define CORE_BUS_REG(base, field) \
27 (base + offsetof(struct sdpcmd_regs, field))
28#define CORE_SB(base, field) \
29 (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
30
31/* SDIO function 1 register CHIPCLKCSR */
32/* Force ALP request to backplane */
33#define SBSDIO_FORCE_ALP 0x01
34/* Force HT request to backplane */
35#define SBSDIO_FORCE_HT 0x02
36/* Force ILP request to backplane */
37#define SBSDIO_FORCE_ILP 0x04
38/* Make ALP ready (power up xtal) */
39#define SBSDIO_ALP_AVAIL_REQ 0x08
40/* Make HT ready (power up PLL) */
41#define SBSDIO_HT_AVAIL_REQ 0x10
42/* Squelch clock requests from HW */
43#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
44/* Status: ALP is ready */
45#define SBSDIO_ALP_AVAIL 0x40
46/* Status: HT is ready */
47#define SBSDIO_HT_AVAIL 0x80
48#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
49#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
50#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
51#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
52#define SBSDIO_CLKAV(regval, alponly) \
53 (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
54
55#define BRCMF_MAX_CORENUM 6
56
57struct brcmf_core {
58 u16 id;
59 u16 rev;
60 u32 base;
61 u32 wrapbase;
62 u32 caps;
63 u32 cib;
64};
65
66struct brcmf_chip {
67 u32 chip;
68 u32 chiprev;
69 /* core info */
70 /* always put chipcommon core at 0, bus core at 1 */
71 struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
72 u32 pmurev;
73 u32 pmucaps;
74 u32 ramsize;
75 u32 rambase;
76 u32 rst_vec; /* reset vertor for ARM CR4 core */
77
78 bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
79 u16 coreid);
80 u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
81 u16 coreid);
82 void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
83 struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
84 u32 in_resetbits);
85 void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
86 struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
87 u32 in_resetbits, u32 post_resetbits);
88};
89
90struct sbconfig {
91 u32 PAD[2];
92 u32 sbipsflag; /* initiator port ocp slave flag */
93 u32 PAD[3];
94 u32 sbtpsflag; /* target port ocp slave flag */
95 u32 PAD[11];
96 u32 sbtmerrloga; /* (sonics >= 2.3) */
97 u32 PAD;
98 u32 sbtmerrlog; /* (sonics >= 2.3) */
99 u32 PAD[3];
100 u32 sbadmatch3; /* address match3 */
101 u32 PAD;
102 u32 sbadmatch2; /* address match2 */
103 u32 PAD;
104 u32 sbadmatch1; /* address match1 */
105 u32 PAD[7];
106 u32 sbimstate; /* initiator agent state */
107 u32 sbintvec; /* interrupt mask */
108 u32 sbtmstatelow; /* target state */
109 u32 sbtmstatehigh; /* target state */
110 u32 sbbwa0; /* bandwidth allocation table0 */
111 u32 PAD;
112 u32 sbimconfiglow; /* initiator configuration */
113 u32 sbimconfighigh; /* initiator configuration */
114 u32 sbadmatch0; /* address match0 */
115 u32 PAD;
116 u32 sbtmconfiglow; /* target configuration */
117 u32 sbtmconfighigh; /* target configuration */
118 u32 sbbconfig; /* broadcast configuration */
119 u32 PAD;
120 u32 sbbstate; /* broadcast state */
121 u32 PAD[3];
122 u32 sbactcnfg; /* activate configuration */
123 u32 PAD[3];
124 u32 sbflagst; /* current sbflags */
125 u32 PAD[3];
126 u32 sbidlow; /* identification */
127 u32 sbidhigh; /* identification */
128};
129
130/* sdio core registers */
131struct sdpcmd_regs {
132 u32 corecontrol; /* 0x00, rev8 */
133 u32 corestatus; /* rev8 */
134 u32 PAD[1];
135 u32 biststatus; /* rev8 */
136
137 /* PCMCIA access */
138 u16 pcmciamesportaladdr; /* 0x010, rev8 */
139 u16 PAD[1];
140 u16 pcmciamesportalmask; /* rev8 */
141 u16 PAD[1];
142 u16 pcmciawrframebc; /* rev8 */
143 u16 PAD[1];
144 u16 pcmciaunderflowtimer; /* rev8 */
145 u16 PAD[1];
146
147 /* interrupt */
148 u32 intstatus; /* 0x020, rev8 */
149 u32 hostintmask; /* rev8 */
150 u32 intmask; /* rev8 */
151 u32 sbintstatus; /* rev8 */
152 u32 sbintmask; /* rev8 */
153 u32 funcintmask; /* rev4 */
154 u32 PAD[2];
155 u32 tosbmailbox; /* 0x040, rev8 */
156 u32 tohostmailbox; /* rev8 */
157 u32 tosbmailboxdata; /* rev8 */
158 u32 tohostmailboxdata; /* rev8 */
159
160 /* synchronized access to registers in SDIO clock domain */
161 u32 sdioaccess; /* 0x050, rev8 */
162 u32 PAD[3];
163
164 /* PCMCIA frame control */
165 u8 pcmciaframectrl; /* 0x060, rev8 */
166 u8 PAD[3];
167 u8 pcmciawatermark; /* rev8 */
168 u8 PAD[155];
169
170 /* interrupt batching control */
171 u32 intrcvlazy; /* 0x100, rev8 */
172 u32 PAD[3];
173
174 /* counters */
175 u32 cmd52rd; /* 0x110, rev8 */
176 u32 cmd52wr; /* rev8 */
177 u32 cmd53rd; /* rev8 */
178 u32 cmd53wr; /* rev8 */
179 u32 abort; /* rev8 */
180 u32 datacrcerror; /* rev8 */
181 u32 rdoutofsync; /* rev8 */
182 u32 wroutofsync; /* rev8 */
183 u32 writebusy; /* rev8 */
184 u32 readwait; /* rev8 */
185 u32 readterm; /* rev8 */
186 u32 writeterm; /* rev8 */
187 u32 PAD[40];
188 u32 clockctlstatus; /* rev8 */
189 u32 PAD[7];
190
191 u32 PAD[128]; /* DMA engines */
192
193 /* SDIO/PCMCIA CIS region */
194 char cis[512]; /* 0x400-0x5ff, rev6 */
195
196 /* PCMCIA function control registers */
197 char pcmciafcr[256]; /* 0x600-6ff, rev6 */
198 u16 PAD[55];
199
200 /* PCMCIA backplane access */
201 u16 backplanecsr; /* 0x76E, rev6 */
202 u16 backplaneaddr0; /* rev6 */
203 u16 backplaneaddr1; /* rev6 */
204 u16 backplaneaddr2; /* rev6 */
205 u16 backplaneaddr3; /* rev6 */
206 u16 backplanedata0; /* rev6 */
207 u16 backplanedata1; /* rev6 */
208 u16 backplanedata2; /* rev6 */
209 u16 backplanedata3; /* rev6 */
210 u16 PAD[31];
211
212 /* sprom "size" & "blank" info */
213 u16 spromstatus; /* 0x7BE, rev2 */
214 u32 PAD[464];
215
216 u16 PAD[0x80];
217};
218
219int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
220 struct brcmf_chip **ci_ptr);
221void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
222void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
223 struct brcmf_chip *ci,
224 u32 drivestrength);
225u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
226void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
227 struct brcmf_chip *ci);
228bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
229 struct brcmf_chip *ci, u32 rstvec);
230
231#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 092e9c824992..3deab7959a0d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -180,6 +180,97 @@ struct brcmf_sdio_dev {
180 uint max_request_size; 180 uint max_request_size;
181 ushort max_segment_count; 181 ushort max_segment_count;
182 uint max_segment_size; 182 uint max_segment_size;
183 uint txglomsz;
184 struct sg_table sgtable;
185};
186
187/* sdio core registers */
188struct sdpcmd_regs {
189 u32 corecontrol; /* 0x00, rev8 */
190 u32 corestatus; /* rev8 */
191 u32 PAD[1];
192 u32 biststatus; /* rev8 */
193
194 /* PCMCIA access */
195 u16 pcmciamesportaladdr; /* 0x010, rev8 */
196 u16 PAD[1];
197 u16 pcmciamesportalmask; /* rev8 */
198 u16 PAD[1];
199 u16 pcmciawrframebc; /* rev8 */
200 u16 PAD[1];
201 u16 pcmciaunderflowtimer; /* rev8 */
202 u16 PAD[1];
203
204 /* interrupt */
205 u32 intstatus; /* 0x020, rev8 */
206 u32 hostintmask; /* rev8 */
207 u32 intmask; /* rev8 */
208 u32 sbintstatus; /* rev8 */
209 u32 sbintmask; /* rev8 */
210 u32 funcintmask; /* rev4 */
211 u32 PAD[2];
212 u32 tosbmailbox; /* 0x040, rev8 */
213 u32 tohostmailbox; /* rev8 */
214 u32 tosbmailboxdata; /* rev8 */
215 u32 tohostmailboxdata; /* rev8 */
216
217 /* synchronized access to registers in SDIO clock domain */
218 u32 sdioaccess; /* 0x050, rev8 */
219 u32 PAD[3];
220
221 /* PCMCIA frame control */
222 u8 pcmciaframectrl; /* 0x060, rev8 */
223 u8 PAD[3];
224 u8 pcmciawatermark; /* rev8 */
225 u8 PAD[155];
226
227 /* interrupt batching control */
228 u32 intrcvlazy; /* 0x100, rev8 */
229 u32 PAD[3];
230
231 /* counters */
232 u32 cmd52rd; /* 0x110, rev8 */
233 u32 cmd52wr; /* rev8 */
234 u32 cmd53rd; /* rev8 */
235 u32 cmd53wr; /* rev8 */
236 u32 abort; /* rev8 */
237 u32 datacrcerror; /* rev8 */
238 u32 rdoutofsync; /* rev8 */
239 u32 wroutofsync; /* rev8 */
240 u32 writebusy; /* rev8 */
241 u32 readwait; /* rev8 */
242 u32 readterm; /* rev8 */
243 u32 writeterm; /* rev8 */
244 u32 PAD[40];
245 u32 clockctlstatus; /* rev8 */
246 u32 PAD[7];
247
248 u32 PAD[128]; /* DMA engines */
249
250 /* SDIO/PCMCIA CIS region */
251 char cis[512]; /* 0x400-0x5ff, rev6 */
252
253 /* PCMCIA function control registers */
254 char pcmciafcr[256]; /* 0x600-6ff, rev6 */
255 u16 PAD[55];
256
257 /* PCMCIA backplane access */
258 u16 backplanecsr; /* 0x76E, rev6 */
259 u16 backplaneaddr0; /* rev6 */
260 u16 backplaneaddr1; /* rev6 */
261 u16 backplaneaddr2; /* rev6 */
262 u16 backplaneaddr3; /* rev6 */
263 u16 backplanedata0; /* rev6 */
264 u16 backplanedata1; /* rev6 */
265 u16 backplanedata2; /* rev6 */
266 u16 backplanedata3; /* rev6 */
267 u16 PAD[31];
268
269 /* sprom "size" & "blank" info */
270 u16 spromstatus; /* 0x7BE, rev2 */
271 u32 PAD[464];
272
273 u16 PAD[0x80];
183}; 274};
184 275
185/* Register/deregister interrupt handler. */ 276/* Register/deregister interrupt handler. */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index d7718a5fa2f0..afb3d15e38ff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/module.h>
21#include <net/cfg80211.h> 22#include <net/cfg80211.h>
22#include <net/netlink.h> 23#include <net/netlink.h>
23 24
@@ -190,6 +191,7 @@ static struct ieee80211_supported_band __wl_band_2ghz = {
190 .n_channels = ARRAY_SIZE(__wl_2ghz_channels), 191 .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
191 .bitrates = wl_g_rates, 192 .bitrates = wl_g_rates,
192 .n_bitrates = wl_g_rates_size, 193 .n_bitrates = wl_g_rates_size,
194 .ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
193}; 195};
194 196
195static struct ieee80211_supported_band __wl_band_5ghz_a = { 197static struct ieee80211_supported_band __wl_band_5ghz_a = {
@@ -251,6 +253,10 @@ struct parsed_vndr_ies {
251 struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT]; 253 struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
252}; 254};
253 255
256static int brcmf_roamoff;
257module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
258MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
259
254/* Quarter dBm units to mW 260/* Quarter dBm units to mW
255 * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 261 * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
256 * Table is offset so the last entry is largest mW value that fits in 262 * Table is offset so the last entry is largest mW value that fits in
@@ -351,13 +357,11 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
351 * triples, returning a pointer to the substring whose first element 357 * triples, returning a pointer to the substring whose first element
352 * matches tag 358 * matches tag
353 */ 359 */
354struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key) 360const struct brcmf_tlv *
361brcmf_parse_tlvs(const void *buf, int buflen, uint key)
355{ 362{
356 struct brcmf_tlv *elt; 363 const struct brcmf_tlv *elt = buf;
357 int totlen; 364 int totlen = buflen;
358
359 elt = (struct brcmf_tlv *)buf;
360 totlen = buflen;
361 365
362 /* find tagged parameter */ 366 /* find tagged parameter */
363 while (totlen >= TLV_HDR_LEN) { 367 while (totlen >= TLV_HDR_LEN) {
@@ -378,8 +382,8 @@ struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
378 * not update the tlvs buffer pointer/length. 382 * not update the tlvs buffer pointer/length.
379 */ 383 */
380static bool 384static bool
381brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, 385brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
382 u8 *oui, u32 oui_len, u8 type) 386 const u8 *oui, u32 oui_len, u8 type)
383{ 387{
384 /* If the contents match the OUI and the type */ 388 /* If the contents match the OUI and the type */
385 if (ie[TLV_LEN_OFF] >= oui_len + 1 && 389 if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
@@ -401,12 +405,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
401} 405}
402 406
403static struct brcmf_vs_tlv * 407static struct brcmf_vs_tlv *
404brcmf_find_wpaie(u8 *parse, u32 len) 408brcmf_find_wpaie(const u8 *parse, u32 len)
405{ 409{
406 struct brcmf_tlv *ie; 410 const struct brcmf_tlv *ie;
407 411
408 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { 412 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
409 if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len, 413 if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
410 WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE)) 414 WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
411 return (struct brcmf_vs_tlv *)ie; 415 return (struct brcmf_vs_tlv *)ie;
412 } 416 }
@@ -414,9 +418,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
414} 418}
415 419
416static struct brcmf_vs_tlv * 420static struct brcmf_vs_tlv *
417brcmf_find_wpsie(u8 *parse, u32 len) 421brcmf_find_wpsie(const u8 *parse, u32 len)
418{ 422{
419 struct brcmf_tlv *ie; 423 const struct brcmf_tlv *ie;
420 424
421 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { 425 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
422 if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len, 426 if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
@@ -491,6 +495,19 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
491 return err; 495 return err;
492} 496}
493 497
498static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
499{
500 enum nl80211_iftype iftype;
501
502 iftype = vif->wdev.iftype;
503 return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
504}
505
506static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
507{
508 return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
509}
510
494static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, 511static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
495 const char *name, 512 const char *name,
496 enum nl80211_iftype type, 513 enum nl80211_iftype type,
@@ -651,7 +668,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
651 type); 668 type);
652 return -EOPNOTSUPP; 669 return -EOPNOTSUPP;
653 case NL80211_IFTYPE_ADHOC: 670 case NL80211_IFTYPE_ADHOC:
654 vif->mode = WL_MODE_IBSS;
655 infra = 0; 671 infra = 0;
656 break; 672 break;
657 case NL80211_IFTYPE_STATION: 673 case NL80211_IFTYPE_STATION:
@@ -667,12 +683,10 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
667 */ 683 */
668 return 0; 684 return 0;
669 } 685 }
670 vif->mode = WL_MODE_BSS;
671 infra = 1; 686 infra = 1;
672 break; 687 break;
673 case NL80211_IFTYPE_AP: 688 case NL80211_IFTYPE_AP:
674 case NL80211_IFTYPE_P2P_GO: 689 case NL80211_IFTYPE_P2P_GO:
675 vif->mode = WL_MODE_AP;
676 ap = 1; 690 ap = 1;
677 break; 691 break;
678 default: 692 default:
@@ -696,7 +710,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
696 err = -EAGAIN; 710 err = -EAGAIN;
697 goto done; 711 goto done;
698 } 712 }
699 brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ? 713 brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
700 "Adhoc" : "Infra"); 714 "Adhoc" : "Infra");
701 } 715 }
702 ndev->ieee80211_ptr->iftype = type; 716 ndev->ieee80211_ptr->iftype = type;
@@ -1340,13 +1354,14 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
1340} 1354}
1341 1355
1342static s32 1356static s32
1343brcmf_set_set_cipher(struct net_device *ndev, 1357brcmf_set_wsec_mode(struct net_device *ndev,
1344 struct cfg80211_connect_params *sme) 1358 struct cfg80211_connect_params *sme, bool mfp)
1345{ 1359{
1346 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); 1360 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
1347 struct brcmf_cfg80211_security *sec; 1361 struct brcmf_cfg80211_security *sec;
1348 s32 pval = 0; 1362 s32 pval = 0;
1349 s32 gval = 0; 1363 s32 gval = 0;
1364 s32 wsec;
1350 s32 err = 0; 1365 s32 err = 0;
1351 1366
1352 if (sme->crypto.n_ciphers_pairwise) { 1367 if (sme->crypto.n_ciphers_pairwise) {
@@ -1398,7 +1413,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
1398 if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval && 1413 if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
1399 sme->privacy) 1414 sme->privacy)
1400 pval = AES_ENABLED; 1415 pval = AES_ENABLED;
1401 err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval); 1416
1417 if (mfp)
1418 wsec = pval | gval | MFP_CAPABLE;
1419 else
1420 wsec = pval | gval;
1421 err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
1402 if (err) { 1422 if (err) {
1403 brcmf_err("error (%d)\n", err); 1423 brcmf_err("error (%d)\n", err);
1404 return err; 1424 return err;
@@ -1562,13 +1582,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1562 struct ieee80211_channel *chan = sme->channel; 1582 struct ieee80211_channel *chan = sme->channel;
1563 struct brcmf_join_params join_params; 1583 struct brcmf_join_params join_params;
1564 size_t join_params_size; 1584 size_t join_params_size;
1565 struct brcmf_tlv *rsn_ie; 1585 const struct brcmf_tlv *rsn_ie;
1566 struct brcmf_vs_tlv *wpa_ie; 1586 const struct brcmf_vs_tlv *wpa_ie;
1567 void *ie; 1587 const void *ie;
1568 u32 ie_len; 1588 u32 ie_len;
1569 struct brcmf_ext_join_params_le *ext_join_params; 1589 struct brcmf_ext_join_params_le *ext_join_params;
1570 u16 chanspec; 1590 u16 chanspec;
1571
1572 s32 err = 0; 1591 s32 err = 0;
1573 1592
1574 brcmf_dbg(TRACE, "Enter\n"); 1593 brcmf_dbg(TRACE, "Enter\n");
@@ -1591,7 +1610,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1591 ie_len = wpa_ie->len + TLV_HDR_LEN; 1610 ie_len = wpa_ie->len + TLV_HDR_LEN;
1592 } else { 1611 } else {
1593 /* find the RSN_IE */ 1612 /* find the RSN_IE */
1594 rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len, 1613 rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
1614 sme->ie_len,
1595 WLAN_EID_RSN); 1615 WLAN_EID_RSN);
1596 if (rsn_ie) { 1616 if (rsn_ie) {
1597 ie = rsn_ie; 1617 ie = rsn_ie;
@@ -1636,7 +1656,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1636 goto done; 1656 goto done;
1637 } 1657 }
1638 1658
1639 err = brcmf_set_set_cipher(ndev, sme); 1659 err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
1640 if (err) { 1660 if (err) {
1641 brcmf_err("wl_set_set_cipher failed (%d)\n", err); 1661 brcmf_err("wl_set_set_cipher failed (%d)\n", err);
1642 goto done; 1662 goto done;
@@ -1678,22 +1698,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1678 ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len); 1698 ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
1679 memcpy(&ext_join_params->ssid_le.SSID, sme->ssid, 1699 memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
1680 profile->ssid.SSID_len); 1700 profile->ssid.SSID_len);
1681 /*increase dwell time to receive probe response or detect Beacon 1701
1682 * from target AP at a noisy air only during connect command
1683 */
1684 ext_join_params->scan_le.active_time =
1685 cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
1686 ext_join_params->scan_le.passive_time =
1687 cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
1688 /* Set up join scan parameters */ 1702 /* Set up join scan parameters */
1689 ext_join_params->scan_le.scan_type = -1; 1703 ext_join_params->scan_le.scan_type = -1;
1690 /* to sync with presence period of VSDB GO.
1691 * Send probe request more frequently. Probe request will be stopped
1692 * when it gets probe response from target AP/GO.
1693 */
1694 ext_join_params->scan_le.nprobes =
1695 cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
1696 BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
1697 ext_join_params->scan_le.home_time = cpu_to_le32(-1); 1704 ext_join_params->scan_le.home_time = cpu_to_le32(-1);
1698 1705
1699 if (sme->bssid) 1706 if (sme->bssid)
@@ -1706,6 +1713,25 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1706 1713
1707 ext_join_params->assoc_le.chanspec_list[0] = 1714 ext_join_params->assoc_le.chanspec_list[0] =
1708 cpu_to_le16(chanspec); 1715 cpu_to_le16(chanspec);
1716 /* Increase dwell time to receive probe response or detect
1717 * beacon from target AP at a noisy air only during connect
1718 * command.
1719 */
1720 ext_join_params->scan_le.active_time =
1721 cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
1722 ext_join_params->scan_le.passive_time =
1723 cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
1724 /* To sync with presence period of VSDB GO send probe request
1725 * more frequently. Probe request will be stopped when it gets
1726 * probe response from target AP/GO.
1727 */
1728 ext_join_params->scan_le.nprobes =
1729 cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
1730 BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
1731 } else {
1732 ext_join_params->scan_le.active_time = cpu_to_le32(-1);
1733 ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
1734 ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
1709 } 1735 }
1710 1736
1711 err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params, 1737 err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
@@ -1913,7 +1939,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1913 brcmf_dbg(CONN, "Setting the key index %d\n", key.index); 1939 brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
1914 memcpy(key.data, params->key, key.len); 1940 memcpy(key.data, params->key, key.len);
1915 1941
1916 if ((ifp->vif->mode != WL_MODE_AP) && 1942 if (!brcmf_is_apmode(ifp->vif) &&
1917 (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { 1943 (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
1918 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); 1944 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
1919 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 1945 memcpy(keybuf, &key.data[24], sizeof(keybuf));
@@ -1981,7 +2007,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
1981 if (!check_vif_up(ifp->vif)) 2007 if (!check_vif_up(ifp->vif))
1982 return -EIO; 2008 return -EIO;
1983 2009
1984 if (mac_addr) { 2010 if (mac_addr &&
2011 (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
2012 (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
1985 brcmf_dbg(TRACE, "Exit"); 2013 brcmf_dbg(TRACE, "Exit");
1986 return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params); 2014 return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
1987 } 2015 }
@@ -2010,7 +2038,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2010 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); 2038 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
2011 break; 2039 break;
2012 case WLAN_CIPHER_SUITE_TKIP: 2040 case WLAN_CIPHER_SUITE_TKIP:
2013 if (ifp->vif->mode != WL_MODE_AP) { 2041 if (!brcmf_is_apmode(ifp->vif)) {
2014 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); 2042 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
2015 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 2043 memcpy(keybuf, &key.data[24], sizeof(keybuf));
2016 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 2044 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
@@ -2164,12 +2192,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2164 s32 err = 0; 2192 s32 err = 0;
2165 u8 *bssid = profile->bssid; 2193 u8 *bssid = profile->bssid;
2166 struct brcmf_sta_info_le sta_info_le; 2194 struct brcmf_sta_info_le sta_info_le;
2195 u32 beacon_period;
2196 u32 dtim_period;
2167 2197
2168 brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); 2198 brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
2169 if (!check_vif_up(ifp->vif)) 2199 if (!check_vif_up(ifp->vif))
2170 return -EIO; 2200 return -EIO;
2171 2201
2172 if (ifp->vif->mode == WL_MODE_AP) { 2202 if (brcmf_is_apmode(ifp->vif)) {
2173 memcpy(&sta_info_le, mac, ETH_ALEN); 2203 memcpy(&sta_info_le, mac, ETH_ALEN);
2174 err = brcmf_fil_iovar_data_get(ifp, "sta_info", 2204 err = brcmf_fil_iovar_data_get(ifp, "sta_info",
2175 &sta_info_le, 2205 &sta_info_le,
@@ -2186,7 +2216,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2186 } 2216 }
2187 brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n", 2217 brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
2188 sinfo->inactive_time, sinfo->connected_time); 2218 sinfo->inactive_time, sinfo->connected_time);
2189 } else if (ifp->vif->mode == WL_MODE_BSS) { 2219 } else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
2190 if (memcmp(mac, bssid, ETH_ALEN)) { 2220 if (memcmp(mac, bssid, ETH_ALEN)) {
2191 brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n", 2221 brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
2192 mac, bssid); 2222 mac, bssid);
@@ -2218,6 +2248,30 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2218 sinfo->signal = rssi; 2248 sinfo->signal = rssi;
2219 brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); 2249 brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
2220 } 2250 }
2251 err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
2252 &beacon_period);
2253 if (err) {
2254 brcmf_err("Could not get beacon period (%d)\n",
2255 err);
2256 goto done;
2257 } else {
2258 sinfo->bss_param.beacon_interval =
2259 beacon_period;
2260 brcmf_dbg(CONN, "Beacon peroid %d\n",
2261 beacon_period);
2262 }
2263 err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
2264 &dtim_period);
2265 if (err) {
2266 brcmf_err("Could not get DTIM period (%d)\n",
2267 err);
2268 goto done;
2269 } else {
2270 sinfo->bss_param.dtim_period = dtim_period;
2271 brcmf_dbg(CONN, "DTIM peroid %d\n",
2272 dtim_period);
2273 }
2274 sinfo->filled |= STATION_INFO_BSS_PARAM;
2221 } 2275 }
2222 } else 2276 } else
2223 err = -EPERM; 2277 err = -EPERM;
@@ -2444,18 +2498,13 @@ CleanUp:
2444 return err; 2498 return err;
2445} 2499}
2446 2500
2447static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
2448{
2449 return vif->mode == WL_MODE_IBSS;
2450}
2451
2452static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, 2501static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
2453 struct brcmf_if *ifp) 2502 struct brcmf_if *ifp)
2454{ 2503{
2455 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev); 2504 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
2456 struct brcmf_bss_info_le *bi; 2505 struct brcmf_bss_info_le *bi;
2457 struct brcmf_ssid *ssid; 2506 struct brcmf_ssid *ssid;
2458 struct brcmf_tlv *tim; 2507 const struct brcmf_tlv *tim;
2459 u16 beacon_interval; 2508 u16 beacon_interval;
2460 u8 dtim_period; 2509 u8 dtim_period;
2461 size_t ie_len; 2510 size_t ie_len;
@@ -3220,8 +3269,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
3220} 3269}
3221 3270
3222static s32 3271static s32
3223brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie, 3272brcmf_configure_wpaie(struct net_device *ndev,
3224 bool is_rsn_ie) 3273 const struct brcmf_vs_tlv *wpa_ie,
3274 bool is_rsn_ie)
3225{ 3275{
3226 struct brcmf_if *ifp = netdev_priv(ndev); 3276 struct brcmf_if *ifp = netdev_priv(ndev);
3227 u32 auth = 0; /* d11 open authentication */ 3277 u32 auth = 0; /* d11 open authentication */
@@ -3707,11 +3757,11 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3707 s32 ie_offset; 3757 s32 ie_offset;
3708 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 3758 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3709 struct brcmf_if *ifp = netdev_priv(ndev); 3759 struct brcmf_if *ifp = netdev_priv(ndev);
3710 struct brcmf_tlv *ssid_ie; 3760 const struct brcmf_tlv *ssid_ie;
3711 struct brcmf_ssid_le ssid_le; 3761 struct brcmf_ssid_le ssid_le;
3712 s32 err = -EPERM; 3762 s32 err = -EPERM;
3713 struct brcmf_tlv *rsn_ie; 3763 const struct brcmf_tlv *rsn_ie;
3714 struct brcmf_vs_tlv *wpa_ie; 3764 const struct brcmf_vs_tlv *wpa_ie;
3715 struct brcmf_join_params join_params; 3765 struct brcmf_join_params join_params;
3716 enum nl80211_iftype dev_role; 3766 enum nl80211_iftype dev_role;
3717 struct brcmf_fil_bss_enable_le bss_enable; 3767 struct brcmf_fil_bss_enable_le bss_enable;
@@ -4220,32 +4270,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
4220 CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode) 4270 CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
4221}; 4271};
4222 4272
4223static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
4224{
4225 switch (type) {
4226 case NL80211_IFTYPE_AP_VLAN:
4227 case NL80211_IFTYPE_WDS:
4228 case NL80211_IFTYPE_MONITOR:
4229 case NL80211_IFTYPE_MESH_POINT:
4230 return -ENOTSUPP;
4231 case NL80211_IFTYPE_ADHOC:
4232 return WL_MODE_IBSS;
4233 case NL80211_IFTYPE_STATION:
4234 case NL80211_IFTYPE_P2P_CLIENT:
4235 return WL_MODE_BSS;
4236 case NL80211_IFTYPE_AP:
4237 case NL80211_IFTYPE_P2P_GO:
4238 return WL_MODE_AP;
4239 case NL80211_IFTYPE_P2P_DEVICE:
4240 return WL_MODE_P2P;
4241 case NL80211_IFTYPE_UNSPECIFIED:
4242 default:
4243 break;
4244 }
4245
4246 return -EINVAL;
4247}
4248
4249static void brcmf_wiphy_pno_params(struct wiphy *wiphy) 4273static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
4250{ 4274{
4251 /* scheduled scan settings */ 4275 /* scheduled scan settings */
@@ -4370,7 +4394,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
4370 vif->wdev.wiphy = cfg->wiphy; 4394 vif->wdev.wiphy = cfg->wiphy;
4371 vif->wdev.iftype = type; 4395 vif->wdev.iftype = type;
4372 4396
4373 vif->mode = brcmf_nl80211_iftype_to_mode(type);
4374 vif->pm_block = pm_block; 4397 vif->pm_block = pm_block;
4375 vif->roam_off = -1; 4398 vif->roam_off = -1;
4376 4399
@@ -4416,7 +4439,9 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
4416 u32 event = e->event_code; 4439 u32 event = e->event_code;
4417 u16 flags = e->flags; 4440 u16 flags = e->flags;
4418 4441
4419 if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) { 4442 if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) ||
4443 (event == BRCMF_E_DISASSOC_IND) ||
4444 ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
4420 brcmf_dbg(CONN, "Processing link down\n"); 4445 brcmf_dbg(CONN, "Processing link down\n");
4421 return true; 4446 return true;
4422 } 4447 }
@@ -4658,16 +4683,19 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
4658 struct brcmf_cfg80211_info *cfg = ifp->drvr->config; 4683 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
4659 struct net_device *ndev = ifp->ndev; 4684 struct net_device *ndev = ifp->ndev;
4660 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; 4685 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
4686 struct ieee80211_channel *chan;
4661 s32 err = 0; 4687 s32 err = 0;
4688 u16 reason;
4662 4689
4663 if (ifp->vif->mode == WL_MODE_AP) { 4690 if (brcmf_is_apmode(ifp->vif)) {
4664 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data); 4691 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
4665 } else if (brcmf_is_linkup(e)) { 4692 } else if (brcmf_is_linkup(e)) {
4666 brcmf_dbg(CONN, "Linkup\n"); 4693 brcmf_dbg(CONN, "Linkup\n");
4667 if (brcmf_is_ibssmode(ifp->vif)) { 4694 if (brcmf_is_ibssmode(ifp->vif)) {
4695 chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
4668 memcpy(profile->bssid, e->addr, ETH_ALEN); 4696 memcpy(profile->bssid, e->addr, ETH_ALEN);
4669 wl_inform_ibss(cfg, ndev, e->addr); 4697 wl_inform_ibss(cfg, ndev, e->addr);
4670 cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL); 4698 cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
4671 clear_bit(BRCMF_VIF_STATUS_CONNECTING, 4699 clear_bit(BRCMF_VIF_STATUS_CONNECTING,
4672 &ifp->vif->sme_state); 4700 &ifp->vif->sme_state);
4673 set_bit(BRCMF_VIF_STATUS_CONNECTED, 4701 set_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -4679,9 +4707,15 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
4679 if (!brcmf_is_ibssmode(ifp->vif)) { 4707 if (!brcmf_is_ibssmode(ifp->vif)) {
4680 brcmf_bss_connect_done(cfg, ndev, e, false); 4708 brcmf_bss_connect_done(cfg, ndev, e, false);
4681 if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, 4709 if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
4682 &ifp->vif->sme_state)) 4710 &ifp->vif->sme_state)) {
4683 cfg80211_disconnected(ndev, 0, NULL, 0, 4711 reason = 0;
4712 if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
4713 (e->event_code == BRCMF_E_DISASSOC_IND)) &&
4714 (e->reason != WLAN_REASON_UNSPECIFIED))
4715 reason = e->reason;
4716 cfg80211_disconnected(ndev, reason, NULL, 0,
4684 GFP_KERNEL); 4717 GFP_KERNEL);
4718 }
4685 } 4719 }
4686 brcmf_link_down(ifp->vif); 4720 brcmf_link_down(ifp->vif);
4687 brcmf_init_prof(ndev_to_prof(ndev)); 4721 brcmf_init_prof(ndev_to_prof(ndev));
@@ -4875,11 +4909,8 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
4875 4909
4876 cfg->scan_request = NULL; 4910 cfg->scan_request = NULL;
4877 cfg->pwr_save = true; 4911 cfg->pwr_save = true;
4878 cfg->roam_on = true; /* roam on & off switch. 4912 cfg->active_scan = true; /* we do active scan per default */
4879 we enable roam per default */ 4913 cfg->dongle_up = false; /* dongle is not up yet */
4880 cfg->active_scan = true; /* we do active scan for
4881 specific scan per default */
4882 cfg->dongle_up = false; /* dongle is not up yet */
4883 err = brcmf_init_priv_mem(cfg); 4914 err = brcmf_init_priv_mem(cfg);
4884 if (err) 4915 if (err)
4885 return err; 4916 return err;
@@ -4904,6 +4935,30 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
4904 mutex_init(&event->vif_event_lock); 4935 mutex_init(&event->vif_event_lock);
4905} 4936}
4906 4937
4938static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
4939{
4940 struct brcmf_fil_bwcap_le band_bwcap;
4941 u32 val;
4942 int err;
4943
4944 /* verify support for bw_cap command */
4945 val = WLC_BAND_5G;
4946 err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
4947
4948 if (!err) {
4949 /* only set 2G bandwidth using bw_cap command */
4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
4953 sizeof(band_bwcap));
4954 } else {
4955 brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
4956 val = WLC_N_BW_40ALL;
4957 err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
4958 }
4959 return err;
4960}
4961
4907struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, 4962struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
4908 struct device *busdev) 4963 struct device *busdev)
4909{ 4964{
@@ -4961,6 +5016,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
4961 goto cfg80211_p2p_attach_out; 5016 goto cfg80211_p2p_attach_out;
4962 } 5017 }
4963 5018
5019 /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
5020 * setup 40MHz in 2GHz band and enable OBSS scanning.
5021 */
5022 if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
5023 IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
5024 err = brcmf_enable_bw40_2g(ifp);
5025 if (!err)
5026 err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
5027 BRCMF_OBSS_COEX_AUTO);
5028 }
5029
4964 err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1); 5030 err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
4965 if (err) { 5031 if (err) {
4966 brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err); 5032 brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
@@ -4999,7 +5065,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
4999} 5065}
5000 5066
5001static s32 5067static s32
5002brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout) 5068brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
5003{ 5069{
5004 s32 err = 0; 5070 s32 err = 0;
5005 __le32 roamtrigger[2]; 5071 __le32 roamtrigger[2];
@@ -5009,7 +5075,7 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
5009 * Setup timeout if Beacons are lost and roam is 5075 * Setup timeout if Beacons are lost and roam is
5010 * off to report link down 5076 * off to report link down
5011 */ 5077 */
5012 if (roamvar) { 5078 if (brcmf_roamoff) {
5013 err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout); 5079 err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
5014 if (err) { 5080 if (err) {
5015 brcmf_err("bcn_timeout error (%d)\n", err); 5081 brcmf_err("bcn_timeout error (%d)\n", err);
@@ -5021,8 +5087,9 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
5021 * Enable/Disable built-in roaming to allow supplicant 5087 * Enable/Disable built-in roaming to allow supplicant
5022 * to take care of roaming 5088 * to take care of roaming
5023 */ 5089 */
5024 brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On"); 5090 brcmf_dbg(INFO, "Internal Roaming = %s\n",
5025 err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar); 5091 brcmf_roamoff ? "Off" : "On");
5092 err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff));
5026 if (err) { 5093 if (err) {
5027 brcmf_err("roam_off error (%d)\n", err); 5094 brcmf_err("roam_off error (%d)\n", err);
5028 goto dongle_rom_out; 5095 goto dongle_rom_out;
@@ -5164,9 +5231,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
5164 ieee80211_channel_to_frequency(ch.chnum, band); 5231 ieee80211_channel_to_frequency(ch.chnum, band);
5165 band_chan_arr[index].hw_value = ch.chnum; 5232 band_chan_arr[index].hw_value = ch.chnum;
5166 5233
5167 brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
5168 ch.chnum, band_chan_arr[index].center_freq,
5169 ch.bw, ch.sb);
5170 if (ch.bw == BRCMU_CHAN_BW_40) { 5234 if (ch.bw == BRCMU_CHAN_BW_40) {
5171 /* assuming the order is HT20, HT40 Upper, 5235 /* assuming the order is HT20, HT40 Upper,
5172 * HT40 lower from chanspecs 5236 * HT40 lower from chanspecs
@@ -5267,6 +5331,8 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5267 u32 band_list[3]; 5331 u32 band_list[3];
5268 u32 nmode; 5332 u32 nmode;
5269 u32 bw_cap[2] = { 0, 0 }; 5333 u32 bw_cap[2] = { 0, 0 };
5334 u32 rxchain;
5335 u32 nchain;
5270 s8 phy; 5336 s8 phy;
5271 s32 err; 5337 s32 err;
5272 u32 nband; 5338 u32 nband;
@@ -5303,6 +5369,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5303 brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode, 5369 brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
5304 bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]); 5370 bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
5305 5371
5372 err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
5373 if (err) {
5374 brcmf_err("rxchain error (%d)\n", err);
5375 nchain = 1;
5376 } else {
5377 for (nchain = 0; rxchain; nchain++)
5378 rxchain = rxchain & (rxchain - 1);
5379 }
5380 brcmf_dbg(INFO, "nchain=%d\n", nchain);
5381
5306 err = brcmf_construct_reginfo(cfg, bw_cap); 5382 err = brcmf_construct_reginfo(cfg, bw_cap);
5307 if (err) { 5383 if (err) {
5308 brcmf_err("brcmf_construct_reginfo failed (%d)\n", err); 5384 brcmf_err("brcmf_construct_reginfo failed (%d)\n", err);
@@ -5331,10 +5407,7 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5331 band->ht_cap.ht_supported = true; 5407 band->ht_cap.ht_supported = true;
5332 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 5408 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
5333 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; 5409 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
5334 /* An HT shall support all EQM rates for one spatial 5410 memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
5335 * stream
5336 */
5337 band->ht_cap.mcs.rx_mask[0] = 0xff;
5338 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 5411 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
5339 bands[band->band] = band; 5412 bands[band->band] = band;
5340 } 5413 }
@@ -5381,7 +5454,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
5381 brcmf_dbg(INFO, "power save set to %s\n", 5454 brcmf_dbg(INFO, "power save set to %s\n",
5382 (power_mode ? "enabled" : "disabled")); 5455 (power_mode ? "enabled" : "disabled"));
5383 5456
5384 err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT); 5457 err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT);
5385 if (err) 5458 if (err)
5386 goto default_conf_out; 5459 goto default_conf_out;
5387 err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype, 5460 err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 2dc6a074e8ed..283c525a44f7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -89,21 +89,6 @@ enum brcmf_scan_status {
89 BRCMF_SCAN_STATUS_SUPPRESS, 89 BRCMF_SCAN_STATUS_SUPPRESS,
90}; 90};
91 91
92/**
93 * enum wl_mode - driver mode of virtual interface.
94 *
95 * @WL_MODE_BSS: connects to BSS.
96 * @WL_MODE_IBSS: operate as ad-hoc.
97 * @WL_MODE_AP: operate as access-point.
98 * @WL_MODE_P2P: provide P2P discovery.
99 */
100enum wl_mode {
101 WL_MODE_BSS,
102 WL_MODE_IBSS,
103 WL_MODE_AP,
104 WL_MODE_P2P
105};
106
107/* dongle configuration */ 92/* dongle configuration */
108struct brcmf_cfg80211_conf { 93struct brcmf_cfg80211_conf {
109 u32 frag_threshold; 94 u32 frag_threshold;
@@ -193,7 +178,6 @@ struct vif_saved_ie {
193 * @ifp: lower layer interface pointer 178 * @ifp: lower layer interface pointer
194 * @wdev: wireless device. 179 * @wdev: wireless device.
195 * @profile: profile information. 180 * @profile: profile information.
196 * @mode: operating mode.
197 * @roam_off: roaming state. 181 * @roam_off: roaming state.
198 * @sme_state: SME state using enum brcmf_vif_status bits. 182 * @sme_state: SME state using enum brcmf_vif_status bits.
199 * @pm_block: power-management blocked. 183 * @pm_block: power-management blocked.
@@ -204,7 +188,6 @@ struct brcmf_cfg80211_vif {
204 struct brcmf_if *ifp; 188 struct brcmf_if *ifp;
205 struct wireless_dev wdev; 189 struct wireless_dev wdev;
206 struct brcmf_cfg80211_profile profile; 190 struct brcmf_cfg80211_profile profile;
207 s32 mode;
208 s32 roam_off; 191 s32 roam_off;
209 unsigned long sme_state; 192 unsigned long sme_state;
210 bool pm_block; 193 bool pm_block;
@@ -402,7 +385,6 @@ struct brcmf_cfg80211_info {
402 bool ibss_starter; 385 bool ibss_starter;
403 bool pwr_save; 386 bool pwr_save;
404 bool dongle_up; 387 bool dongle_up;
405 bool roam_on;
406 bool scan_tried; 388 bool scan_tried;
407 u8 *dcmd_buf; 389 u8 *dcmd_buf;
408 u8 *extra_buf; 390 u8 *extra_buf;
@@ -491,7 +473,8 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
491s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, 473s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
492 const u8 *vndr_ie_buf, u32 vndr_ie_len); 474 const u8 *vndr_ie_buf, u32 vndr_ie_len);
493s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif); 475s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
494struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key); 476const struct brcmf_tlv *
477brcmf_parse_tlvs(const void *buf, int buflen, uint key);
495u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, 478u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
496 struct ieee80211_channel *ch); 479 struct ieee80211_channel *ch);
497u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state); 480u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 925034b80e9c..8c5fa4e58139 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
426 bool blocked; 426 bool blocked;
427 int err; 427 int err;
428 428
429 if (!wl->ucode.bcm43xx_bomminor) {
430 err = brcms_request_fw(wl, wl->wlc->hw->d11core);
431 if (err)
432 return -ENOENT;
433 }
434
429 ieee80211_wake_queues(hw); 435 ieee80211_wake_queues(hw);
430 spin_lock_bh(&wl->lock); 436 spin_lock_bh(&wl->lock);
431 blocked = brcms_rfkill_set_hw_state(wl); 437 blocked = brcms_rfkill_set_hw_state(wl);
@@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
433 if (!blocked) 439 if (!blocked)
434 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); 440 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
435 441
436 if (!wl->ucode.bcm43xx_bomminor) {
437 err = brcms_request_fw(wl, wl->wlc->hw->d11core);
438 if (err) {
439 brcms_remove(wl->wlc->hw->d11core);
440 return -ENOENT;
441 }
442 }
443
444 spin_lock_bh(&wl->lock); 442 spin_lock_bh(&wl->lock);
445 /* avoid acknowledging frames before a non-monitor device is added */ 443 /* avoid acknowledging frames before a non-monitor device is added */
446 wl->mute_tx = true; 444 wl->mute_tx = true;
@@ -1094,12 +1092,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
1094 * Attach to the WL device identified by vendor and device parameters. 1092 * Attach to the WL device identified by vendor and device parameters.
1095 * regs is a host accessible memory address pointing to WL device registers. 1093 * regs is a host accessible memory address pointing to WL device registers.
1096 * 1094 *
1097 * brcms_attach is not defined as static because in the case where no bus
1098 * is defined, wl_attach will never be called, and thus, gcc will issue
1099 * a warning that this function is defined but not used if we declare
1100 * it as static.
1101 *
1102 *
1103 * is called in brcms_bcma_probe() context, therefore no locking required. 1095 * is called in brcms_bcma_probe() context, therefore no locking required.
1104 */ 1096 */
1105static struct brcms_info *brcms_attach(struct bcma_device *pdev) 1097static struct brcms_info *brcms_attach(struct bcma_device *pdev)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 6fa5d4863782..d816270db3be 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -43,5 +43,6 @@
43#define BCM4335_CHIP_ID 0x4335 43#define BCM4335_CHIP_ID 0x4335
44#define BCM43362_CHIP_ID 43362 44#define BCM43362_CHIP_ID 43362
45#define BCM4339_CHIP_ID 0x4339 45#define BCM4339_CHIP_ID 0x4339
46#define BCM4354_CHIP_ID 0x4354
46 47
47#endif /* _BRCM_HW_IDS_H_ */ 48#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 7ca2aa1035b2..74419d4bd123 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -217,6 +217,9 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
217#define WSEC_SWFLAG 0x0008 217#define WSEC_SWFLAG 0x0008
218/* to go into transition mode without setting wep */ 218/* to go into transition mode without setting wep */
219#define SES_OW_ENABLED 0x0040 219#define SES_OW_ENABLED 0x0040
220/* MFP */
221#define MFP_CAPABLE 0x0200
222#define MFP_REQUIRED 0x0400
220 223
221/* WPA authentication mode bitvec */ 224/* WPA authentication mode bitvec */
222#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */ 225#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 5a9ffd3a6a6c..e23d67e0bfe6 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -202,8 +202,8 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
202 } 202 }
203 203
204 /* calculate the block size */ 204 /* calculate the block size */
205 tx_size = block_size = min((size_t)(firmware->size - put), 205 tx_size = block_size = min_t(size_t, firmware->size - put,
206 (size_t)DOWNLOAD_BLOCK_SIZE); 206 DOWNLOAD_BLOCK_SIZE);
207 207
208 memcpy(buf, &firmware->data[put], block_size); 208 memcpy(buf, &firmware->data[put], block_size);
209 if (block_size < DOWNLOAD_BLOCK_SIZE) { 209 if (block_size < DOWNLOAD_BLOCK_SIZE) {
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 9f825f2620da..b6ec51923b20 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -677,6 +677,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
677 PCMCIA_DEVICE_PROD_ID12( 677 PCMCIA_DEVICE_PROD_ID12(
678 "ZoomAir 11Mbps High", "Rate wireless Networking", 678 "ZoomAir 11Mbps High", "Rate wireless Networking",
679 0x273fe3db, 0x32a1eaee), 679 0x273fe3db, 0x32a1eaee),
680 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card",
681 0xa37434e9, 0x9762e8f1),
680 PCMCIA_DEVICE_PROD_ID123( 682 PCMCIA_DEVICE_PROD_ID123(
681 "Pretec", "CompactWLAN Card 802.11b", "2.5", 683 "Pretec", "CompactWLAN Card 802.11b", "2.5",
682 0x1cadd3e5, 0xe697636c, 0x7a5bfcf1), 684 0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3aba49259ef1..dfc6dfc56d52 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -7065,7 +7065,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
7065 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 7065 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
7066 return -E2BIG; 7066 return -E2BIG;
7067 7067
7068 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); 7068 wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
7069 memset(priv->nick, 0, sizeof(priv->nick)); 7069 memset(priv->nick, 0, sizeof(priv->nick));
7070 memcpy(priv->nick, extra, wrqu->data.length); 7070 memcpy(priv->nick, extra, wrqu->data.length);
7071 7071
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 139326065bd9..c5aa404069f3 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -9169,7 +9169,7 @@ static int ipw_wx_set_nick(struct net_device *dev,
9169 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 9169 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9170 return -E2BIG; 9170 return -E2BIG;
9171 mutex_lock(&priv->mutex); 9171 mutex_lock(&priv->mutex);
9172 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); 9172 wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9173 memset(priv->nick, 0, sizeof(priv->nick)); 9173 memset(priv->nick, 0, sizeof(priv->nick));
9174 memcpy(priv->nick, extra, wrqu->data.length); 9174 memcpy(priv->nick, extra, wrqu->data.length);
9175 IPW_DEBUG_TRACE("<<\n"); 9175 IPW_DEBUG_TRACE("<<\n");
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 0487461ae4da..dc1d20cf64ee 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -1248,14 +1248,7 @@ il3945_rx_handle(struct il_priv *il)
1248 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 1248 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
1249 len += sizeof(u32); /* account for status word */ 1249 len += sizeof(u32); /* account for status word */
1250 1250
1251 /* Reclaim a command buffer only if this packet is a response 1251 reclaim = il_need_reclaim(il, pkt);
1252 * to a (driver-originated) command.
1253 * If the packet (e.g. Rx frame) originated from uCode,
1254 * there is no command buffer to reclaim.
1255 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1256 * but apparently a few don't get set; catch them here. */
1257 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1258 pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
1259 1252
1260 /* Based on type of command response or notification, 1253 /* Based on type of command response or notification,
1261 * handle those that need handling via function in 1254 * handle those that need handling via function in
@@ -1495,12 +1488,14 @@ il3945_irq_tasklet(struct il_priv *il)
1495 if (inta & CSR_INT_BIT_WAKEUP) { 1488 if (inta & CSR_INT_BIT_WAKEUP) {
1496 D_ISR("Wakeup interrupt\n"); 1489 D_ISR("Wakeup interrupt\n");
1497 il_rx_queue_update_write_ptr(il, &il->rxq); 1490 il_rx_queue_update_write_ptr(il, &il->rxq);
1491
1492 spin_lock_irqsave(&il->lock, flags);
1498 il_txq_update_write_ptr(il, &il->txq[0]); 1493 il_txq_update_write_ptr(il, &il->txq[0]);
1499 il_txq_update_write_ptr(il, &il->txq[1]); 1494 il_txq_update_write_ptr(il, &il->txq[1]);
1500 il_txq_update_write_ptr(il, &il->txq[2]); 1495 il_txq_update_write_ptr(il, &il->txq[2]);
1501 il_txq_update_write_ptr(il, &il->txq[3]); 1496 il_txq_update_write_ptr(il, &il->txq[3]);
1502 il_txq_update_write_ptr(il, &il->txq[4]); 1497 il_txq_update_write_ptr(il, &il->txq[4]);
1503 il_txq_update_write_ptr(il, &il->txq[5]); 1498 spin_unlock_irqrestore(&il->lock, flags);
1504 1499
1505 il->isr_stats.wakeup++; 1500 il->isr_stats.wakeup++;
1506 handled |= CSR_INT_BIT_WAKEUP; 1501 handled |= CSR_INT_BIT_WAKEUP;
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 9a45f6f626f6..76b0729ade17 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -891,8 +891,7 @@ il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
891{ 891{
892} 892}
893 893
894static struct rate_control_ops rs_ops = { 894static const struct rate_control_ops rs_ops = {
895 .module = NULL,
896 .name = RS_NAME, 895 .name = RS_NAME,
897 .tx_status = il3945_rs_tx_status, 896 .tx_status = il3945_rs_tx_status,
898 .get_rate = il3945_rs_get_rate, 897 .get_rate = il3945_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 43f488a8cda2..888ad5c74639 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -92,7 +92,6 @@ il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
92 * EEPROM 92 * EEPROM
93 */ 93 */
94struct il_mod_params il4965_mod_params = { 94struct il_mod_params il4965_mod_params = {
95 .amsdu_size_8K = 1,
96 .restart_fw = 1, 95 .restart_fw = 1,
97 /* the rest are 0 by default */ 96 /* the rest are 0 by default */
98}; 97};
@@ -4274,17 +4273,7 @@ il4965_rx_handle(struct il_priv *il)
4274 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 4273 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4275 len += sizeof(u32); /* account for status word */ 4274 len += sizeof(u32); /* account for status word */
4276 4275
4277 /* Reclaim a command buffer only if this packet is a response 4276 reclaim = il_need_reclaim(il, pkt);
4278 * to a (driver-originated) command.
4279 * If the packet (e.g. Rx frame) originated from uCode,
4280 * there is no command buffer to reclaim.
4281 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4282 * but apparently a few don't get set; catch them here. */
4283 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4284 (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
4285 (pkt->hdr.cmd != N_RX_MPDU) &&
4286 (pkt->hdr.cmd != N_COMPRESSED_BA) &&
4287 (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
4288 4277
4289 /* Based on type of command response or notification, 4278 /* Based on type of command response or notification,
4290 * handle those that need handling via function in 4279 * handle those that need handling via function in
@@ -6876,6 +6865,6 @@ module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6876MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); 6865MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6877module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 6866module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
6878 S_IRUGO); 6867 S_IRUGO);
6879MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 6868MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
6880module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO); 6869module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6881MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 6870MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 4d5e33259ca8..eaaeea19d8c5 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2807,8 +2807,7 @@ il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
2807{ 2807{
2808} 2808}
2809 2809
2810static struct rate_control_ops rs_4965_ops = { 2810static const struct rate_control_ops rs_4965_ops = {
2811 .module = NULL,
2812 .name = IL4965_RS_NAME, 2811 .name = IL4965_RS_NAME,
2813 .tx_status = il4965_rs_tx_status, 2812 .tx_status = il4965_rs_tx_status,
2814 .get_rate = il4965_rs_get_rate, 2813 .get_rate = il4965_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 048421511988..dd744135c956 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -2270,7 +2270,8 @@ struct il_spectrum_notification {
2270 */ 2270 */
2271#define IL_POWER_VEC_SIZE 5 2271#define IL_POWER_VEC_SIZE 5
2272 2272
2273#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) 2273#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2274#define IL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2274#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) 2275#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2275 2276
2276struct il3945_powertable_cmd { 2277struct il3945_powertable_cmd {
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 02e8233ccf29..4f42174d9994 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1078,29 +1078,82 @@ EXPORT_SYMBOL(il_get_channel_info);
1078 * Setting power level allows the card to go to sleep when not busy. 1078 * Setting power level allows the card to go to sleep when not busy.
1079 * 1079 *
1080 * We calculate a sleep command based on the required latency, which 1080 * We calculate a sleep command based on the required latency, which
1081 * we get from mac80211. In order to handle thermal throttling, we can 1081 * we get from mac80211.
1082 * also use pre-defined power levels.
1083 */ 1082 */
1084 1083
1085/* 1084#define SLP_VEC(X0, X1, X2, X3, X4) { \
1086 * This defines the old power levels. They are still used by default 1085 cpu_to_le32(X0), \
1087 * (level 1) and for thermal throttle (levels 3 through 5) 1086 cpu_to_le32(X1), \
1088 */ 1087 cpu_to_le32(X2), \
1089 1088 cpu_to_le32(X3), \
1090struct il_power_vec_entry { 1089 cpu_to_le32(X4) \
1091 struct il_powertable_cmd cmd; 1090}
1092 u8 no_dtim; /* number of skip dtim */
1093};
1094 1091
1095static void 1092static void
1096il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) 1093il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1097{ 1094{
1095 const __le32 interval[3][IL_POWER_VEC_SIZE] = {
1096 SLP_VEC(2, 2, 4, 6, 0xFF),
1097 SLP_VEC(2, 4, 7, 10, 10),
1098 SLP_VEC(4, 7, 10, 10, 0xFF)
1099 };
1100 int i, dtim_period, no_dtim;
1101 u32 max_sleep;
1102 bool skip;
1103
1098 memset(cmd, 0, sizeof(*cmd)); 1104 memset(cmd, 0, sizeof(*cmd));
1099 1105
1100 if (il->power_data.pci_pm) 1106 if (il->power_data.pci_pm)
1101 cmd->flags |= IL_POWER_PCI_PM_MSK; 1107 cmd->flags |= IL_POWER_PCI_PM_MSK;
1102 1108
1103 D_POWER("Sleep command for CAM\n"); 1109 /* if no Power Save, we are done */
1110 if (il->power_data.ps_disabled)
1111 return;
1112
1113 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
1114 cmd->keep_alive_seconds = 0;
1115 cmd->debug_flags = 0;
1116 cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
1117 cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
1118 cmd->keep_alive_beacons = 0;
1119
1120 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
1121
1122 if (dtim_period <= 2) {
1123 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
1124 no_dtim = 2;
1125 } else if (dtim_period <= 10) {
1126 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
1127 no_dtim = 2;
1128 } else {
1129 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
1130 no_dtim = 0;
1131 }
1132
1133 if (dtim_period == 0) {
1134 dtim_period = 1;
1135 skip = false;
1136 } else {
1137 skip = !!no_dtim;
1138 }
1139
1140 if (skip) {
1141 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
1142
1143 max_sleep = le32_to_cpu(tmp);
1144 if (max_sleep == 0xFF)
1145 max_sleep = dtim_period * (skip + 1);
1146 else if (max_sleep > dtim_period)
1147 max_sleep = (max_sleep / dtim_period) * dtim_period;
1148 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
1149 } else {
1150 max_sleep = dtim_period;
1151 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
1152 }
1153
1154 for (i = 0; i < IL_POWER_VEC_SIZE; i++)
1155 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1156 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1104} 1157}
1105 1158
1106static int 1159static int
@@ -1173,7 +1226,8 @@ il_power_update_mode(struct il_priv *il, bool force)
1173{ 1226{
1174 struct il_powertable_cmd cmd; 1227 struct il_powertable_cmd cmd;
1175 1228
1176 il_power_sleep_cam_cmd(il, &cmd); 1229 il_build_powertable_cmd(il, &cmd);
1230
1177 return il_power_set_mode(il, &cmd, force); 1231 return il_power_set_mode(il, &cmd, force);
1178} 1232}
1179EXPORT_SYMBOL(il_power_update_mode); 1233EXPORT_SYMBOL(il_power_update_mode);
@@ -5081,6 +5135,7 @@ set_ch_out:
5081 } 5135 }
5082 5136
5083 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { 5137 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5138 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
5084 ret = il_power_update_mode(il, false); 5139 ret = il_power_update_mode(il, false);
5085 if (ret) 5140 if (ret)
5086 D_MAC80211("Error setting sleep level\n"); 5141 D_MAC80211("Error setting sleep level\n");
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index ad123d66ab6c..dfb13c70efe8 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1123,6 +1123,7 @@ struct il_power_mgr {
1123 struct il_powertable_cmd sleep_cmd_next; 1123 struct il_powertable_cmd sleep_cmd_next;
1124 int debug_sleep_level_override; 1124 int debug_sleep_level_override;
1125 bool pci_pm; 1125 bool pci_pm;
1126 bool ps_disabled;
1126}; 1127};
1127 1128
1128struct il_priv { 1129struct il_priv {
@@ -1597,7 +1598,7 @@ struct il_mod_params {
1597 int disable_hw_scan; /* def: 0 = use h/w scan */ 1598 int disable_hw_scan; /* def: 0 = use h/w scan */
1598 int num_of_queues; /* def: HW dependent */ 1599 int num_of_queues; /* def: HW dependent */
1599 int disable_11n; /* def: 0 = 11n capabilities enabled */ 1600 int disable_11n; /* def: 0 = 11n capabilities enabled */
1600 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 1601 int amsdu_size_8K; /* def: 0 = disable 8K amsdu size */
1601 int antenna; /* def: 0 = both antennas (use diversity) */ 1602 int antenna; /* def: 0 = both antennas (use diversity) */
1602 int restart_fw; /* def: 1 = restart firmware */ 1603 int restart_fw; /* def: 1 = restart firmware */
1603}; 1604};
@@ -1978,6 +1979,20 @@ void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
1978u32 il_read_targ_mem(struct il_priv *il, u32 addr); 1979u32 il_read_targ_mem(struct il_priv *il, u32 addr);
1979void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val); 1980void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
1980 1981
1982static inline bool il_need_reclaim(struct il_priv *il, struct il_rx_pkt *pkt)
1983{
1984 /* Reclaim a command buffer only if this packet is a response
1985 * to a (driver-originated) command. If the packet (e.g. Rx frame)
1986 * originated from uCode, there is no command buffer to reclaim.
1987 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, but
1988 * apparently a few don't get set; catch them here.
1989 */
1990 return !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1991 pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX &&
1992 pkt->hdr.cmd != N_RX_PHY && pkt->hdr.cmd != N_RX &&
1993 pkt->hdr.cmd != N_RX_MPDU && pkt->hdr.cmd != N_COMPRESSED_BA;
1994}
1995
1981static inline void 1996static inline void
1982_il_write8(struct il_priv *il, u32 ofs, u8 val) 1997_il_write8(struct il_priv *il, u32 ofs, u8 val)
1983{ 1998{
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 3eb2102ce236..74b3b4de7bb7 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -68,6 +68,19 @@ config IWLWIFI_OPMODE_MODULAR
68comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM" 68comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
69 depends on IWLWIFI && IWLDVM=n && IWLMVM=n 69 depends on IWLWIFI && IWLDVM=n && IWLMVM=n
70 70
71config IWLWIFI_BCAST_FILTERING
72 bool "Enable broadcast filtering"
73 depends on IWLMVM
74 help
75 Say Y here to enable default bcast filtering configuration.
76
77 Enabling broadcast filtering will drop any incoming wireless
78 broadcast frames, except some very specific predefined
79 patterns (e.g. incoming arp requests).
80
81 If unsure, don't enable this option, as some programs might
82 expect incoming broadcasts for their normal operations.
83
71menu "Debugging Options" 84menu "Debugging Options"
72 depends on IWLWIFI 85 depends on IWLWIFI
73 86
@@ -111,6 +124,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
111 Enable use of experimental ucode for testing and debugging. 124 Enable use of experimental ucode for testing and debugging.
112 125
113config IWLWIFI_DEVICE_TRACING 126config IWLWIFI_DEVICE_TRACING
127
114 bool "iwlwifi device access tracing" 128 bool "iwlwifi device access tracing"
115 depends on IWLWIFI 129 depends on IWLWIFI
116 depends on EVENT_TRACING 130 depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1fa64429bcc2..3d32f4120174 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o 8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o 9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
10iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o 10iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
11iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o 11iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
12 12
13iwlwifi-objs += $(iwlwifi-m) 13iwlwifi-objs += $(iwlwifi-m)
14 14
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 562772d85102..c160dad03037 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -109,7 +109,7 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
109 109
110struct iwl_ucode_capabilities; 110struct iwl_ucode_capabilities;
111 111
112extern struct ieee80211_ops iwlagn_hw_ops; 112extern const struct ieee80211_ops iwlagn_hw_ops;
113 113
114static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) 114static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
115{ 115{
@@ -480,7 +480,7 @@ do { \
480} while (0) 480} while (0)
481#endif /* CONFIG_IWLWIFI_DEBUG */ 481#endif /* CONFIG_IWLWIFI_DEBUG */
482 482
483extern const char *iwl_dvm_cmd_strings[REPLY_MAX]; 483extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
484 484
485static inline const char *iwl_dvm_get_cmd_string(u8 cmd) 485static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
486{ 486{
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 7b140e487deb..758c54eeb206 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -317,7 +317,7 @@ static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
317 .nrg_th_cca = 62, 317 .nrg_th_cca = 62,
318}; 318};
319 319
320static struct iwl_sensitivity_ranges iwl5150_sensitivity = { 320static const struct iwl_sensitivity_ranges iwl5150_sensitivity = {
321 .min_nrg_cck = 95, 321 .min_nrg_cck = 95,
322 .auto_corr_min_ofdm = 90, 322 .auto_corr_min_ofdm = 90,
323 .auto_corr_min_ofdm_mrc = 170, 323 .auto_corr_min_ofdm_mrc = 170,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 73086c1629ca..dd55c9cf7ba8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1582,7 +1582,7 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
1582 IWL_DEBUG_MAC80211(priv, "leave\n"); 1582 IWL_DEBUG_MAC80211(priv, "leave\n");
1583} 1583}
1584 1584
1585struct ieee80211_ops iwlagn_hw_ops = { 1585const struct ieee80211_ops iwlagn_hw_ops = {
1586 .tx = iwlagn_mac_tx, 1586 .tx = iwlagn_mac_tx,
1587 .start = iwlagn_mac_start, 1587 .start = iwlagn_mac_start,
1588 .stop = iwlagn_mac_stop, 1588 .stop = iwlagn_mac_stop,
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index ba1b1ea54252..6a6df71af1d7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -252,13 +252,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
252 struct iwl_priv *priv = 252 struct iwl_priv *priv =
253 container_of(work, struct iwl_priv, bt_runtime_config); 253 container_of(work, struct iwl_priv, bt_runtime_config);
254 254
255 mutex_lock(&priv->mutex);
255 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 256 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
256 return; 257 goto out;
257 258
258 /* dont send host command if rf-kill is on */ 259 /* dont send host command if rf-kill is on */
259 if (!iwl_is_ready_rf(priv)) 260 if (!iwl_is_ready_rf(priv))
260 return; 261 goto out;
262
261 iwlagn_send_advance_bt_config(priv); 263 iwlagn_send_advance_bt_config(priv);
264out:
265 mutex_unlock(&priv->mutex);
262} 266}
263 267
264static void iwl_bg_bt_full_concurrency(struct work_struct *work) 268static void iwl_bg_bt_full_concurrency(struct work_struct *work)
@@ -2035,7 +2039,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2035 ieee80211_free_txskb(priv->hw, skb); 2039 ieee80211_free_txskb(priv->hw, skb);
2036} 2040}
2037 2041
2038static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 2042static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2039{ 2043{
2040 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2044 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2041 2045
@@ -2045,6 +2049,8 @@ static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2045 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2049 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2046 2050
2047 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); 2051 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2052
2053 return false;
2048} 2054}
2049 2055
2050static const struct iwl_op_mode_ops iwl_dvm_ops = { 2056static const struct iwl_op_mode_ops iwl_dvm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 0977d93b529d..aa773a2da4ab 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -176,46 +176,46 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
176 * (2.4 GHz) band. 176 * (2.4 GHz) band.
177 */ 177 */
178 178
179static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { 179static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
180 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 180 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
181}; 181};
182 182
183static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { 183static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
184 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */ 184 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
185 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */ 185 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
186 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */ 186 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
187 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */ 187 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
188}; 188};
189 189
190static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { 190static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
191 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ 191 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
192 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ 192 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
193 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */ 193 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
194 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */ 194 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
195}; 195};
196 196
197static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { 197static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
198 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */ 198 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
199 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */ 199 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
200 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */ 200 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
201 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/ 201 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
202}; 202};
203 203
204static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { 204static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
205 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ 205 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
206 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ 206 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
207 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */ 207 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
208 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ 208 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
209}; 209};
210 210
211static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { 211static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
212 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */ 212 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
213 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */ 213 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
214 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */ 214 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
215 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */ 215 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
216}; 216};
217 217
218static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = { 218static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
219 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */ 219 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
220 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */ 220 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
221 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */ 221 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
@@ -1111,7 +1111,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1111 struct iwl_scale_tbl_info *tbl) 1111 struct iwl_scale_tbl_info *tbl)
1112{ 1112{
1113 /* Used to choose among HT tables */ 1113 /* Used to choose among HT tables */
1114 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; 1114 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1115 1115
1116 /* Check for invalid LQ type */ 1116 /* Check for invalid LQ type */
1117 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { 1117 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
@@ -1173,9 +1173,8 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1173 &(lq_sta->lq_info[lq_sta->active_tbl]); 1173 &(lq_sta->lq_info[lq_sta->active_tbl]);
1174 s32 active_sr = active_tbl->win[index].success_ratio; 1174 s32 active_sr = active_tbl->win[index].success_ratio;
1175 s32 active_tpt = active_tbl->expected_tpt[index]; 1175 s32 active_tpt = active_tbl->expected_tpt[index];
1176
1177 /* expected "search" throughput */ 1176 /* expected "search" throughput */
1178 s32 *tpt_tbl = tbl->expected_tpt; 1177 const u16 *tpt_tbl = tbl->expected_tpt;
1179 1178
1180 s32 new_rate, high, low, start_hi; 1179 s32 new_rate, high, low, start_hi;
1181 u16 high_low; 1180 u16 high_low;
@@ -3319,8 +3318,8 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
3319 struct ieee80211_sta *sta, void *priv_sta) 3318 struct ieee80211_sta *sta, void *priv_sta)
3320{ 3319{
3321} 3320}
3322static struct rate_control_ops rs_ops = { 3321
3323 .module = NULL, 3322static const struct rate_control_ops rs_ops = {
3324 .name = RS_NAME, 3323 .name = RS_NAME,
3325 .tx_status = rs_tx_status, 3324 .tx_status = rs_tx_status,
3326 .get_rate = rs_get_rate, 3325 .get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index bdd5644a400b..f6bd25cad203 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -315,7 +315,7 @@ struct iwl_scale_tbl_info {
315 u8 is_dup; /* 1 = duplicated data streams */ 315 u8 is_dup; /* 1 = duplicated data streams */
316 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ 316 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
317 u8 max_search; /* maximun number of tables we can search */ 317 u8 max_search; /* maximun number of tables we can search */
318 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 318 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
319 u32 current_rate; /* rate_n_flags, uCode API format */ 319 u32 current_rate; /* rate_n_flags, uCode API format */
320 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 320 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
321}; 321};
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 7a1bc1c547e1..cd8377346aff 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -39,7 +39,7 @@
39 39
40#define IWL_CMD_ENTRY(x) [x] = #x 40#define IWL_CMD_ENTRY(x) [x] = #x
41 41
42const char *iwl_dvm_cmd_strings[REPLY_MAX] = { 42const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
43 IWL_CMD_ENTRY(REPLY_ALIVE), 43 IWL_CMD_ENTRY(REPLY_ALIVE),
44 IWL_CMD_ENTRY(REPLY_ERROR), 44 IWL_CMD_ENTRY(REPLY_ERROR),
45 IWL_CMD_ENTRY(REPLY_ECHO), 45 IWL_CMD_ENTRY(REPLY_ECHO),
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 2a59da2ff87a..003a546571d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,8 +71,8 @@
71#define IWL3160_UCODE_API_MAX 8 71#define IWL3160_UCODE_API_MAX 8
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 7 74#define IWL7260_UCODE_API_OK 8
75#define IWL3160_UCODE_API_OK 7 75#define IWL3160_UCODE_API_OK 8
76 76
77/* Lowest firmware API version supported */ 77/* Lowest firmware API version supported */
78#define IWL7260_UCODE_API_MIN 7 78#define IWL7260_UCODE_API_MIN 7
@@ -95,6 +95,8 @@
95#define IWL7265_FW_PRE "iwlwifi-7265-" 95#define IWL7265_FW_PRE "iwlwifi-7265-"
96#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 96#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
97 97
98#define NVM_HW_SECTION_NUM_FAMILY_7000 0
99
98static const struct iwl_base_params iwl7000_base_params = { 100static const struct iwl_base_params iwl7000_base_params = {
99 .eeprom_size = OTP_LOW_IMAGE_SIZE, 101 .eeprom_size = OTP_LOW_IMAGE_SIZE,
100 .num_of_queues = IWLAGN_NUM_QUEUES, 102 .num_of_queues = IWLAGN_NUM_QUEUES,
@@ -120,7 +122,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
120 .max_inst_size = IWL60_RTC_INST_SIZE, \ 122 .max_inst_size = IWL60_RTC_INST_SIZE, \
121 .max_data_size = IWL60_RTC_DATA_SIZE, \ 123 .max_data_size = IWL60_RTC_DATA_SIZE, \
122 .base_params = &iwl7000_base_params, \ 124 .base_params = &iwl7000_base_params, \
123 .led_mode = IWL_LED_RF_STATE 125 .led_mode = IWL_LED_RF_STATE, \
126 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000
124 127
125 128
126const struct iwl_cfg iwl7260_2ac_cfg = { 129const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -131,6 +134,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
131 .nvm_ver = IWL7260_NVM_VERSION, 134 .nvm_ver = IWL7260_NVM_VERSION,
132 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 135 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
133 .host_interrupt_operation_mode = true, 136 .host_interrupt_operation_mode = true,
137 .lp_xtal_workaround = true,
134}; 138};
135 139
136const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { 140const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -142,6 +146,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
142 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 146 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
143 .high_temp = true, 147 .high_temp = true,
144 .host_interrupt_operation_mode = true, 148 .host_interrupt_operation_mode = true,
149 .lp_xtal_workaround = true,
145}; 150};
146 151
147const struct iwl_cfg iwl7260_2n_cfg = { 152const struct iwl_cfg iwl7260_2n_cfg = {
@@ -152,6 +157,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
152 .nvm_ver = IWL7260_NVM_VERSION, 157 .nvm_ver = IWL7260_NVM_VERSION,
153 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 158 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
154 .host_interrupt_operation_mode = true, 159 .host_interrupt_operation_mode = true,
160 .lp_xtal_workaround = true,
155}; 161};
156 162
157const struct iwl_cfg iwl7260_n_cfg = { 163const struct iwl_cfg iwl7260_n_cfg = {
@@ -162,6 +168,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
162 .nvm_ver = IWL7260_NVM_VERSION, 168 .nvm_ver = IWL7260_NVM_VERSION,
163 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 169 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
164 .host_interrupt_operation_mode = true, 170 .host_interrupt_operation_mode = true,
171 .lp_xtal_workaround = true,
165}; 172};
166 173
167const struct iwl_cfg iwl3160_2ac_cfg = { 174const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -194,6 +201,17 @@ const struct iwl_cfg iwl3160_n_cfg = {
194 .host_interrupt_operation_mode = true, 201 .host_interrupt_operation_mode = true,
195}; 202};
196 203
204static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
205 {.pwr = 1600, .backoff = 0},
206 {.pwr = 1300, .backoff = 467},
207 {.pwr = 900, .backoff = 1900},
208 {.pwr = 800, .backoff = 2630},
209 {.pwr = 700, .backoff = 3720},
210 {.pwr = 600, .backoff = 5550},
211 {.pwr = 500, .backoff = 9350},
212 {0},
213};
214
197const struct iwl_cfg iwl7265_2ac_cfg = { 215const struct iwl_cfg iwl7265_2ac_cfg = {
198 .name = "Intel(R) Dual Band Wireless AC 7265", 216 .name = "Intel(R) Dual Band Wireless AC 7265",
199 .fw_name_pre = IWL7265_FW_PRE, 217 .fw_name_pre = IWL7265_FW_PRE,
@@ -201,6 +219,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
201 .ht_params = &iwl7000_ht_params, 219 .ht_params = &iwl7000_ht_params,
202 .nvm_ver = IWL7265_NVM_VERSION, 220 .nvm_ver = IWL7265_NVM_VERSION,
203 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 221 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
222 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
204}; 223};
205 224
206const struct iwl_cfg iwl7265_2n_cfg = { 225const struct iwl_cfg iwl7265_2n_cfg = {
@@ -210,6 +229,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
210 .ht_params = &iwl7000_ht_params, 229 .ht_params = &iwl7000_ht_params,
211 .nvm_ver = IWL7265_NVM_VERSION, 230 .nvm_ver = IWL7265_NVM_VERSION,
212 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 231 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
232 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
213}; 233};
214 234
215const struct iwl_cfg iwl7265_n_cfg = { 235const struct iwl_cfg iwl7265_n_cfg = {
@@ -219,6 +239,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
219 .ht_params = &iwl7000_ht_params, 239 .ht_params = &iwl7000_ht_params,
220 .nvm_ver = IWL7265_NVM_VERSION, 240 .nvm_ver = IWL7265_NVM_VERSION,
221 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 241 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
242 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
222}; 243};
223 244
224MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 245MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
new file mode 100644
index 000000000000..f5bd82b88592
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -0,0 +1,132 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/module.h>
65#include <linux/stringify.h>
66#include "iwl-config.h"
67#include "iwl-agn-hw.h"
68
69/* Highest firmware API version supported */
70#define IWL8000_UCODE_API_MAX 8
71
72/* Oldest version we won't warn about */
73#define IWL8000_UCODE_API_OK 8
74
75/* Lowest firmware API version supported */
76#define IWL8000_UCODE_API_MIN 8
77
78/* NVM versions */
79#define IWL8000_NVM_VERSION 0x0a1d
80#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
81
82#define IWL8000_FW_PRE "iwlwifi-8000-"
83#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
84
85#define NVM_HW_SECTION_NUM_FAMILY_8000 10
86
87static const struct iwl_base_params iwl8000_base_params = {
88 .eeprom_size = OTP_LOW_IMAGE_SIZE,
89 .num_of_queues = IWLAGN_NUM_QUEUES,
90 .pll_cfg_val = 0,
91 .shadow_ram_support = true,
92 .led_compensation = 57,
93 .wd_timeout = IWL_LONG_WD_TIMEOUT,
94 .max_event_log_size = 512,
95 .shadow_reg_enable = true,
96 .pcie_l1_allowed = true,
97};
98
99static const struct iwl_ht_params iwl8000_ht_params = {
100 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
101};
102
103#define IWL_DEVICE_8000 \
104 .ucode_api_max = IWL8000_UCODE_API_MAX, \
105 .ucode_api_ok = IWL8000_UCODE_API_OK, \
106 .ucode_api_min = IWL8000_UCODE_API_MIN, \
107 .device_family = IWL_DEVICE_FAMILY_8000, \
108 .max_inst_size = IWL60_RTC_INST_SIZE, \
109 .max_data_size = IWL60_RTC_DATA_SIZE, \
110 .base_params = &iwl8000_base_params, \
111 .led_mode = IWL_LED_RF_STATE, \
112 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000
113
114const struct iwl_cfg iwl8260_2ac_cfg = {
115 .name = "Intel(R) Dual Band Wireless AC 8260",
116 .fw_name_pre = IWL8000_FW_PRE,
117 IWL_DEVICE_8000,
118 .ht_params = &iwl8000_ht_params,
119 .nvm_ver = IWL8000_NVM_VERSION,
120 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
121};
122
123const struct iwl_cfg iwl8260_n_cfg = {
124 .name = "Intel(R) Dual Band Wireless-AC 8260",
125 .fw_name_pre = IWL8000_FW_PRE,
126 IWL_DEVICE_8000,
127 .ht_params = &iwl8000_ht_params,
128 .nvm_ver = IWL8000_NVM_VERSION,
129 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
130};
131
132MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 1ced525157dc..3f17dc3f2c8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -84,6 +84,7 @@ enum iwl_device_family {
84 IWL_DEVICE_FAMILY_6050, 84 IWL_DEVICE_FAMILY_6050,
85 IWL_DEVICE_FAMILY_6150, 85 IWL_DEVICE_FAMILY_6150,
86 IWL_DEVICE_FAMILY_7000, 86 IWL_DEVICE_FAMILY_7000,
87 IWL_DEVICE_FAMILY_8000,
87}; 88};
88 89
89/* 90/*
@@ -192,6 +193,15 @@ struct iwl_eeprom_params {
192 bool enhanced_txpower; 193 bool enhanced_txpower;
193}; 194};
194 195
196/* Tx-backoff power threshold
197 * @pwr: The power limit in mw
198 * @backoff: The tx-backoff in uSec
199 */
200struct iwl_pwr_tx_backoff {
201 u32 pwr;
202 u32 backoff;
203};
204
195/** 205/**
196 * struct iwl_cfg 206 * struct iwl_cfg
197 * @name: Offical name of the device 207 * @name: Offical name of the device
@@ -217,6 +227,9 @@ struct iwl_eeprom_params {
217 * @high_temp: Is this NIC is designated to be in high temperature. 227 * @high_temp: Is this NIC is designated to be in high temperature.
218 * @host_interrupt_operation_mode: device needs host interrupt operation 228 * @host_interrupt_operation_mode: device needs host interrupt operation
219 * mode set 229 * mode set
230 * @d0i3: device uses d0i3 instead of d3
231 * @nvm_hw_section_num: the ID of the HW NVM section
232 * @pwr_tx_backoffs: translation table between power limits and backoffs
220 * 233 *
221 * We enable the driver to be backward compatible wrt. hardware features. 234 * We enable the driver to be backward compatible wrt. hardware features.
222 * API differences in uCode shouldn't be handled here but through TLVs 235 * API differences in uCode shouldn't be handled here but through TLVs
@@ -247,6 +260,10 @@ struct iwl_cfg {
247 const bool internal_wimax_coex; 260 const bool internal_wimax_coex;
248 const bool host_interrupt_operation_mode; 261 const bool host_interrupt_operation_mode;
249 bool high_temp; 262 bool high_temp;
263 bool d0i3;
264 u8 nvm_hw_section_num;
265 bool lp_xtal_workaround;
266 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
250}; 267};
251 268
252/* 269/*
@@ -307,6 +324,8 @@ extern const struct iwl_cfg iwl3160_n_cfg;
307extern const struct iwl_cfg iwl7265_2ac_cfg; 324extern const struct iwl_cfg iwl7265_2ac_cfg;
308extern const struct iwl_cfg iwl7265_2n_cfg; 325extern const struct iwl_cfg iwl7265_2n_cfg;
309extern const struct iwl_cfg iwl7265_n_cfg; 326extern const struct iwl_cfg iwl7265_n_cfg;
327extern const struct iwl_cfg iwl8260_2ac_cfg;
328extern const struct iwl_cfg iwl8260_n_cfg;
310#endif /* CONFIG_IWLMVM */ 329#endif /* CONFIG_IWLMVM */
311 330
312#endif /* __IWL_CONFIG_H__ */ 331#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 9d325516c42d..fe129c94ae3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -139,6 +139,13 @@
139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) 139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
140 140
141/* 141/*
142 * CSR HW resources monitor registers
143 */
144#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214)
145#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228)
146#define CSR_MONITOR_XTAL_RESOURCES (0x00000010)
147
148/*
142 * CSR Hardware Revision Workaround Register. Indicates hardware rev; 149 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
143 * "step" determines CCK backoff for txpower calculation. Used for 4965 only. 150 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
144 * See also CSR_HW_REV register. 151 * See also CSR_HW_REV register.
@@ -173,6 +180,7 @@
173#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ 180#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
174#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ 181#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
175#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ 182#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
183#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
176 184
177#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ 185#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
178#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ 186#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
@@ -240,6 +248,7 @@
240 * 001 -- MAC power-down 248 * 001 -- MAC power-down
241 * 010 -- PHY (radio) power-down 249 * 010 -- PHY (radio) power-down
242 * 011 -- Error 250 * 011 -- Error
251 * 10: XTAL ON request
243 * 9-6: SYS_CONFIG 252 * 9-6: SYS_CONFIG
244 * Indicates current system configuration, reflecting pins on chip 253 * Indicates current system configuration, reflecting pins on chip
245 * as forced high/low by device circuit board. 254 * as forced high/low by device circuit board.
@@ -271,6 +280,7 @@
271#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) 280#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
272#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) 281#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
273#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) 282#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
283#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
274 284
275#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) 285#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
276 286
@@ -395,37 +405,33 @@
395#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) 405#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
396#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) 406#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
397 407
398/* SECURE boot registers */ 408/*
399#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100) 409 * SHR target access (Shared block memory space)
400enum secure_boot_config_reg { 410 *
401 CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001, 411 * Shared internal registers can be accessed directly from PCI bus through SHR
402 CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002, 412 * arbiter without need for the MAC HW to be powered up. This is possible due to
403}; 413 * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
404 414 * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
405#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100) 415 *
406#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100) 416 * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
407enum secure_boot_status_reg { 417 * need not be powered up so no "grab inc access" is required.
408 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003, 418 */
409 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
410 CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
411 CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
412 CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
413};
414
415#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
416enum secure_load_status_reg {
417 CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
418 CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
419 CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
420 CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
421};
422
423#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
424#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
425
426#define CSR_SECURE_TIME_OUT (100)
427 419
428#define FH_TCSR_0_REG0 (0x1D00) 420/*
421 * Registers for accessing shared registers (e.g. SHR_APMG_GP1,
422 * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
423 * first, write to the control register:
424 * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
425 * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
426 * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
427 *
428 * To write the register, first, write to the data register
429 * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
430 * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
431 * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
432 */
433#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec)
434#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4)
429 435
430/* 436/*
431 * HBUS (Host-side Bus) 437 * HBUS (Host-side Bus)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a75aac986a23..c8cbdbe15924 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -126,6 +126,7 @@ do { \
126/* 0x00000F00 - 0x00000100 */ 126/* 0x00000F00 - 0x00000100 */
127#define IWL_DL_POWER 0x00000100 127#define IWL_DL_POWER 0x00000100
128#define IWL_DL_TEMP 0x00000200 128#define IWL_DL_TEMP 0x00000200
129#define IWL_DL_RPM 0x00000400
129#define IWL_DL_SCAN 0x00000800 130#define IWL_DL_SCAN 0x00000800
130/* 0x0000F000 - 0x00001000 */ 131/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC 0x00001000 132#define IWL_DL_ASSOC 0x00001000
@@ -189,5 +190,6 @@ do { \
189#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) 190#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
190#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) 191#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
191#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) 192#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
193#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
192 194
193#endif 195#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 75103554cd63..0a3e841b44a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -128,7 +128,7 @@ struct iwl_drv {
128 const struct iwl_cfg *cfg; 128 const struct iwl_cfg *cfg;
129 129
130 int fw_index; /* firmware we're trying to load */ 130 int fw_index; /* firmware we're trying to load */
131 char firmware_name[25]; /* name of firmware file to load */ 131 char firmware_name[32]; /* name of firmware file to load */
132 132
133 struct completion request_firmware_complete; 133 struct completion request_firmware_complete;
134 134
@@ -237,7 +237,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
237 return -ENOENT; 237 return -ENOENT;
238 } 238 }
239 239
240 sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); 240 snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
241 name_pre, tag);
241 242
242 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 243 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
243 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) 244 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
@@ -403,6 +404,38 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
403 return 0; 404 return 0;
404} 405}
405 406
407static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
408 struct iwl_ucode_capabilities *capa)
409{
410 const struct iwl_ucode_api *ucode_api = (void *)data;
411 u32 api_index = le32_to_cpu(ucode_api->api_index);
412
413 if (api_index >= IWL_API_ARRAY_SIZE) {
414 IWL_ERR(drv, "api_index larger than supported by driver\n");
415 return -EINVAL;
416 }
417
418 capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
419
420 return 0;
421}
422
423static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
424 struct iwl_ucode_capabilities *capa)
425{
426 const struct iwl_ucode_capa *ucode_capa = (void *)data;
427 u32 api_index = le32_to_cpu(ucode_capa->api_index);
428
429 if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
430 IWL_ERR(drv, "api_index larger than supported by driver\n");
431 return -EINVAL;
432 }
433
434 capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
435
436 return 0;
437}
438
406static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, 439static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
407 const struct firmware *ucode_raw, 440 const struct firmware *ucode_raw,
408 struct iwl_firmware_pieces *pieces) 441 struct iwl_firmware_pieces *pieces)
@@ -637,6 +670,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
637 */ 670 */
638 capa->flags = le32_to_cpup((__le32 *)tlv_data); 671 capa->flags = le32_to_cpup((__le32 *)tlv_data);
639 break; 672 break;
673 case IWL_UCODE_TLV_API_CHANGES_SET:
674 if (tlv_len != sizeof(struct iwl_ucode_api))
675 goto invalid_tlv_len;
676 if (iwl_set_ucode_api_flags(drv, tlv_data, capa))
677 goto tlv_error;
678 break;
679 case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
680 if (tlv_len != sizeof(struct iwl_ucode_capa))
681 goto invalid_tlv_len;
682 if (iwl_set_ucode_capabilities(drv, tlv_data, capa))
683 goto tlv_error;
684 break;
640 case IWL_UCODE_TLV_INIT_EVTLOG_PTR: 685 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
641 if (tlv_len != sizeof(u32)) 686 if (tlv_len != sizeof(u32))
642 goto invalid_tlv_len; 687 goto invalid_tlv_len;
@@ -727,6 +772,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
727 if (tlv_len != sizeof(u32)) 772 if (tlv_len != sizeof(u32))
728 goto invalid_tlv_len; 773 goto invalid_tlv_len;
729 drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data); 774 drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
775 drv->fw.valid_tx_ant = (drv->fw.phy_config &
776 FW_PHY_CFG_TX_CHAIN) >>
777 FW_PHY_CFG_TX_CHAIN_POS;
778 drv->fw.valid_rx_ant = (drv->fw.phy_config &
779 FW_PHY_CFG_RX_CHAIN) >>
780 FW_PHY_CFG_RX_CHAIN_POS;
730 break; 781 break;
731 case IWL_UCODE_TLV_SECURE_SEC_RT: 782 case IWL_UCODE_TLV_SECURE_SEC_RT:
732 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, 783 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -1300,8 +1351,7 @@ MODULE_PARM_DESC(antenna_coupling,
1300 1351
1301module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO); 1352module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
1302MODULE_PARM_DESC(wd_disable, 1353MODULE_PARM_DESC(wd_disable,
1303 "Disable stuck queue watchdog timer 0=system default, " 1354 "Disable stuck queue watchdog timer 0=system default, 1=disable (default: 1)");
1304 "1=disable, 2=enable (default: 0)");
1305 1355
1306module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); 1356module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
1307MODULE_PARM_DESC(nvm_file, "NVM file name"); 1357MODULE_PARM_DESC(nvm_file, "NVM file name");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 592c01e11013..3c72cb710b0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -70,6 +70,20 @@
70#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation" 70#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
71#define DRV_AUTHOR "<ilw@linux.intel.com>" 71#define DRV_AUTHOR "<ilw@linux.intel.com>"
72 72
73/* radio config bits (actual values from NVM definition) */
74#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
75#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
76#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
77#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
78#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
79#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
80
81#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF)
82#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF)
83#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF)
84#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF)
85#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF)
86#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF)
73 87
74/** 88/**
75 * DOC: Driver system flows - drv component 89 * DOC: Driver system flows - drv component
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index e3c7deafabe6..f0548b8a64b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -81,16 +81,17 @@ struct iwl_nvm_data {
81 bool sku_cap_band_24GHz_enable; 81 bool sku_cap_band_24GHz_enable;
82 bool sku_cap_band_52GHz_enable; 82 bool sku_cap_band_52GHz_enable;
83 bool sku_cap_11n_enable; 83 bool sku_cap_11n_enable;
84 bool sku_cap_11ac_enable;
84 bool sku_cap_amt_enable; 85 bool sku_cap_amt_enable;
85 bool sku_cap_ipan_enable; 86 bool sku_cap_ipan_enable;
86 87
87 u8 radio_cfg_type; 88 u16 radio_cfg_type;
88 u8 radio_cfg_step; 89 u8 radio_cfg_step;
89 u8 radio_cfg_dash; 90 u8 radio_cfg_dash;
90 u8 radio_cfg_pnum; 91 u8 radio_cfg_pnum;
91 u8 valid_tx_ant, valid_rx_ant; 92 u8 valid_tx_ant, valid_rx_ant;
92 93
93 u16 nvm_version; 94 u32 nvm_version;
94 s8 max_tx_pwr_half_dbm; 95 s8 max_tx_pwr_half_dbm;
95 96
96 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 97 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 88e2d6eb569f..b45e576a4b57 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -126,6 +126,8 @@ enum iwl_ucode_tlv_type {
126 IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26, 126 IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
127 IWL_UCODE_TLV_NUM_OF_CPU = 27, 127 IWL_UCODE_TLV_NUM_OF_CPU = 27,
128 IWL_UCODE_TLV_CSCHEME = 28, 128 IWL_UCODE_TLV_CSCHEME = 28,
129 IWL_UCODE_TLV_API_CHANGES_SET = 29,
130 IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
129}; 131};
130 132
131struct iwl_ucode_tlv { 133struct iwl_ucode_tlv {
@@ -158,4 +160,19 @@ struct iwl_tlv_ucode_header {
158 u8 data[0]; 160 u8 data[0];
159}; 161};
160 162
163/*
164 * ucode TLVs
165 *
166 * ability to get extension for: flags & capabilities from ucode binaries files
167 */
168struct iwl_ucode_api {
169 __le32 api_index;
170 __le32 api_flags;
171} __packed;
172
173struct iwl_ucode_capa {
174 __le32 api_index;
175 __le32 api_capa;
176} __packed;
177
161#endif /* __iwl_fw_file_h__ */ 178#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 5f1493c44097..d14f19339d61 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -92,9 +92,11 @@
92 * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API 92 * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
93 * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command 93 * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
94 * containing CAM (Continuous Active Mode) indication. 94 * containing CAM (Continuous Active Mode) indication.
95 * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a 95 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
96 * single bound interface). 96 * P2P client interfaces simultaneously if they are in different bindings.
97 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save 97 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
98 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
99 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
98 */ 100 */
99enum iwl_ucode_tlv_flag { 101enum iwl_ucode_tlv_flag {
100 IWL_UCODE_TLV_FLAGS_PAN = BIT(0), 102 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -116,9 +118,27 @@ enum iwl_ucode_tlv_flag {
116 IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17), 118 IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
117 IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19), 119 IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
118 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20), 120 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
119 IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21), 121 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
120 IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), 122 IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
121 IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), 123 IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
124 IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
125 IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
126};
127
128/**
129 * enum iwl_ucode_tlv_api - ucode api
130 * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
131 */
132enum iwl_ucode_tlv_api {
133 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
134};
135
136/**
137 * enum iwl_ucode_tlv_capa - ucode capabilities
138 * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
139 */
140enum iwl_ucode_tlv_capa {
141 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
122}; 142};
123 143
124/* The default calibrate table size if not specified by firmware file */ 144/* The default calibrate table size if not specified by firmware file */
@@ -160,13 +180,16 @@ enum iwl_ucode_sec {
160 * For 16.0 uCode and above, there is no differentiation between sections, 180 * For 16.0 uCode and above, there is no differentiation between sections,
161 * just an offset to the HW address. 181 * just an offset to the HW address.
162 */ 182 */
163#define IWL_UCODE_SECTION_MAX 6 183#define IWL_UCODE_SECTION_MAX 12
164#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2) 184#define IWL_API_ARRAY_SIZE 1
185#define IWL_CAPABILITIES_ARRAY_SIZE 1
165 186
166struct iwl_ucode_capabilities { 187struct iwl_ucode_capabilities {
167 u32 max_probe_length; 188 u32 max_probe_length;
168 u32 standard_phy_calibration_size; 189 u32 standard_phy_calibration_size;
169 u32 flags; 190 u32 flags;
191 u32 api[IWL_API_ARRAY_SIZE];
192 u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
170}; 193};
171 194
172/* one for each uCode image (inst/data, init/runtime/wowlan) */ 195/* one for each uCode image (inst/data, init/runtime/wowlan) */
@@ -285,22 +308,12 @@ struct iwl_fw {
285 308
286 struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX]; 309 struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
287 u32 phy_config; 310 u32 phy_config;
311 u8 valid_tx_ant;
312 u8 valid_rx_ant;
288 313
289 bool mvm_fw; 314 bool mvm_fw;
290 315
291 struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; 316 struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
292}; 317};
293 318
294static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
295{
296 return (fw->phy_config & FW_PHY_CFG_TX_CHAIN) >>
297 FW_PHY_CFG_TX_CHAIN_POS;
298}
299
300static inline u8 iwl_fw_valid_rx_ant(const struct iwl_fw *fw)
301{
302 return (fw->phy_config & FW_PHY_CFG_RX_CHAIN) >>
303 FW_PHY_CFG_RX_CHAIN_POS;
304}
305
306#endif /* __iwl_fw_h__ */ 319#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index f98175a0d35b..44cc3cf45762 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -93,14 +93,14 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
93} 93}
94IWL_EXPORT_SYMBOL(iwl_poll_direct_bit); 94IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
95 95
96static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs) 96u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
97{ 97{
98 u32 val = iwl_trans_read_prph(trans, ofs); 98 u32 val = iwl_trans_read_prph(trans, ofs);
99 trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val); 99 trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
100 return val; 100 return val;
101} 101}
102 102
103static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 103void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
104{ 104{
105 trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val); 105 trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
106 iwl_trans_write_prph(trans, ofs, val); 106 iwl_trans_write_prph(trans, ofs, val);
@@ -130,6 +130,21 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
130} 130}
131IWL_EXPORT_SYMBOL(iwl_write_prph); 131IWL_EXPORT_SYMBOL(iwl_write_prph);
132 132
133int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
134 u32 bits, u32 mask, int timeout)
135{
136 int t = 0;
137
138 do {
139 if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
140 return t;
141 udelay(IWL_POLL_INTERVAL);
142 t += IWL_POLL_INTERVAL;
143 } while (t < timeout);
144
145 return -ETIMEDOUT;
146}
147
133void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) 148void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
134{ 149{
135 unsigned long flags; 150 unsigned long flags;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index c339c1bed080..665ddd9dbbc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -70,8 +70,12 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
70void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); 70void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
71 71
72 72
73u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs);
73u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); 74u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
75void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
74void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); 76void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
77int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
78 u32 bits, u32 mask, int timeout);
75void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); 79void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
76void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, 80void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
77 u32 bits, u32 mask); 81 u32 bits, u32 mask);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index b29075c3da8e..d994317db85b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -96,7 +96,7 @@ enum iwl_disable_11n {
96 * use IWL_[DIS,EN]ABLE_HT_* constants 96 * use IWL_[DIS,EN]ABLE_HT_* constants
97 * @amsdu_size_8K: enable 8K amsdu size, default = 0 97 * @amsdu_size_8K: enable 8K amsdu size, default = 0
98 * @restart_fw: restart firmware, default = 1 98 * @restart_fw: restart firmware, default = 1
99 * @wd_disable: enable stuck queue check, default = 0 99 * @wd_disable: disable stuck queue check, default = 1
100 * @bt_coex_active: enable bt coex, default = true 100 * @bt_coex_active: enable bt coex, default = true
101 * @led_mode: system default, default = 0 101 * @led_mode: system default, default = 0
102 * @power_save: disable power save, default = false 102 * @power_save: disable power save, default = false
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 725e954d8475..6be30c698506 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -71,7 +71,7 @@ enum wkp_nvm_offsets {
71 /* NVM HW-Section offset (in words) definitions */ 71 /* NVM HW-Section offset (in words) definitions */
72 HW_ADDR = 0x15, 72 HW_ADDR = 0x15,
73 73
74/* NVM SW-Section offset (in words) definitions */ 74 /* NVM SW-Section offset (in words) definitions */
75 NVM_SW_SECTION = 0x1C0, 75 NVM_SW_SECTION = 0x1C0,
76 NVM_VERSION = 0, 76 NVM_VERSION = 0,
77 RADIO_CFG = 1, 77 RADIO_CFG = 1,
@@ -79,11 +79,32 @@ enum wkp_nvm_offsets {
79 N_HW_ADDRS = 3, 79 N_HW_ADDRS = 3,
80 NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, 80 NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
81 81
82/* NVM calibration section offset (in words) definitions */ 82 /* NVM calibration section offset (in words) definitions */
83 NVM_CALIB_SECTION = 0x2B8, 83 NVM_CALIB_SECTION = 0x2B8,
84 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION 84 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
85}; 85};
86 86
87enum family_8000_nvm_offsets {
88 /* NVM HW-Section offset (in words) definitions */
89 HW_ADDR0_FAMILY_8000 = 0x12,
90 HW_ADDR1_FAMILY_8000 = 0x16,
91 MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
92
93 /* NVM SW-Section offset (in words) definitions */
94 NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
95 NVM_VERSION_FAMILY_8000 = 0,
96 RADIO_CFG_FAMILY_8000 = 2,
97 SKU_FAMILY_8000 = 4,
98 N_HW_ADDRS_FAMILY_8000 = 5,
99
100 /* NVM REGULATORY -Section offset (in words) definitions */
101 NVM_CHANNELS_FAMILY_8000 = 0,
102
103 /* NVM calibration section offset (in words) definitions */
104 NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
105 XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000
106};
107
87/* SKU Capabilities (actual values from NVM definition) */ 108/* SKU Capabilities (actual values from NVM definition) */
88enum nvm_sku_bits { 109enum nvm_sku_bits {
89 NVM_SKU_CAP_BAND_24GHZ = BIT(0), 110 NVM_SKU_CAP_BAND_24GHZ = BIT(0),
@@ -92,14 +113,6 @@ enum nvm_sku_bits {
92 NVM_SKU_CAP_11AC_ENABLE = BIT(3), 113 NVM_SKU_CAP_11AC_ENABLE = BIT(3),
93}; 114};
94 115
95/* radio config bits (actual values from NVM definition) */
96#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
97#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
98#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
99#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
100#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
101#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
102
103/* 116/*
104 * These are the channel numbers in the order that they are stored in the NVM 117 * These are the channel numbers in the order that they are stored in the NVM
105 */ 118 */
@@ -112,7 +125,17 @@ static const u8 iwl_nvm_channels[] = {
112 149, 153, 157, 161, 165 125 149, 153, 157, 161, 165
113}; 126};
114 127
128static const u8 iwl_nvm_channels_family_8000[] = {
129 /* 2.4 GHz */
130 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
131 /* 5 GHz */
132 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
133 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
134 149, 153, 157, 161, 165, 169, 173, 177, 181
135};
136
115#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 137#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
138#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
116#define NUM_2GHZ_CHANNELS 14 139#define NUM_2GHZ_CHANNELS 14
117#define FIRST_2GHZ_HT_MINUS 5 140#define FIRST_2GHZ_HT_MINUS 5
118#define LAST_2GHZ_HT_PLUS 9 141#define LAST_2GHZ_HT_PLUS 9
@@ -179,8 +202,18 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
179 struct ieee80211_channel *channel; 202 struct ieee80211_channel *channel;
180 u16 ch_flags; 203 u16 ch_flags;
181 bool is_5ghz; 204 bool is_5ghz;
205 int num_of_ch;
206 const u8 *nvm_chan;
207
208 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
209 num_of_ch = IWL_NUM_CHANNELS;
210 nvm_chan = &iwl_nvm_channels[0];
211 } else {
212 num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
213 nvm_chan = &iwl_nvm_channels_family_8000[0];
214 }
182 215
183 for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) { 216 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
184 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); 217 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
185 218
186 if (ch_idx >= NUM_2GHZ_CHANNELS && 219 if (ch_idx >= NUM_2GHZ_CHANNELS &&
@@ -190,7 +223,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
190 if (!(ch_flags & NVM_CHANNEL_VALID)) { 223 if (!(ch_flags & NVM_CHANNEL_VALID)) {
191 IWL_DEBUG_EEPROM(dev, 224 IWL_DEBUG_EEPROM(dev,
192 "Ch. %d Flags %x [%sGHz] - No traffic\n", 225 "Ch. %d Flags %x [%sGHz] - No traffic\n",
193 iwl_nvm_channels[ch_idx], 226 nvm_chan[ch_idx],
194 ch_flags, 227 ch_flags,
195 (ch_idx >= NUM_2GHZ_CHANNELS) ? 228 (ch_idx >= NUM_2GHZ_CHANNELS) ?
196 "5.2" : "2.4"); 229 "5.2" : "2.4");
@@ -200,7 +233,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
200 channel = &data->channels[n_channels]; 233 channel = &data->channels[n_channels];
201 n_channels++; 234 n_channels++;
202 235
203 channel->hw_value = iwl_nvm_channels[ch_idx]; 236 channel->hw_value = nvm_chan[ch_idx];
204 channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ? 237 channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
205 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 238 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
206 channel->center_freq = 239 channel->center_freq =
@@ -211,11 +244,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
211 channel->flags = IEEE80211_CHAN_NO_HT40; 244 channel->flags = IEEE80211_CHAN_NO_HT40;
212 if (ch_idx < NUM_2GHZ_CHANNELS && 245 if (ch_idx < NUM_2GHZ_CHANNELS &&
213 (ch_flags & NVM_CHANNEL_40MHZ)) { 246 (ch_flags & NVM_CHANNEL_40MHZ)) {
214 if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS) 247 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
215 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 248 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
216 if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS) 249 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
217 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 250 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
218 } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT && 251 } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
219 (ch_flags & NVM_CHANNEL_40MHZ)) { 252 (ch_flags & NVM_CHANNEL_40MHZ)) {
220 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 253 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
221 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 254 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -266,9 +299,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
266 299
267static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, 300static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
268 struct iwl_nvm_data *data, 301 struct iwl_nvm_data *data,
269 struct ieee80211_sta_vht_cap *vht_cap) 302 struct ieee80211_sta_vht_cap *vht_cap,
303 u8 tx_chains, u8 rx_chains)
270{ 304{
271 int num_ants = num_of_ant(data->valid_rx_ant); 305 int num_rx_ants = num_of_ant(rx_chains);
306 int num_tx_ants = num_of_ant(tx_chains);
272 307
273 vht_cap->vht_supported = true; 308 vht_cap->vht_supported = true;
274 309
@@ -278,8 +313,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
278 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | 313 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
279 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 314 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
280 315
281 if (num_ants > 1) 316 if (num_tx_ants > 1)
282 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 317 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
318 else
319 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
283 320
284 if (iwlwifi_mod_params.amsdu_size_8K) 321 if (iwlwifi_mod_params.amsdu_size_8K)
285 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; 322 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@@ -294,10 +331,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
294 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | 331 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
295 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); 332 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
296 333
297 if (num_ants == 1 || 334 if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
298 cfg->rx_with_siso_diversity) { 335 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
299 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
300 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
301 /* this works because NOT_SUPPORTED == 3 */ 336 /* this works because NOT_SUPPORTED == 3 */
302 vht_cap->vht_mcs.rx_mcs_map |= 337 vht_cap->vht_mcs.rx_mcs_map |=
303 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); 338 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
@@ -307,14 +342,23 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
307} 342}
308 343
309static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 344static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
310 struct iwl_nvm_data *data, const __le16 *nvm_sw, 345 struct iwl_nvm_data *data,
311 bool enable_vht, u8 tx_chains, u8 rx_chains) 346 const __le16 *ch_section, bool enable_vht,
347 u8 tx_chains, u8 rx_chains)
312{ 348{
313 int n_channels = iwl_init_channel_map(dev, cfg, data, 349 int n_channels;
314 &nvm_sw[NVM_CHANNELS]);
315 int n_used = 0; 350 int n_used = 0;
316 struct ieee80211_supported_band *sband; 351 struct ieee80211_supported_band *sband;
317 352
353 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
354 n_channels = iwl_init_channel_map(
355 dev, cfg, data,
356 &ch_section[NVM_CHANNELS]);
357 else
358 n_channels = iwl_init_channel_map(
359 dev, cfg, data,
360 &ch_section[NVM_CHANNELS_FAMILY_8000]);
361
318 sband = &data->bands[IEEE80211_BAND_2GHZ]; 362 sband = &data->bands[IEEE80211_BAND_2GHZ];
319 sband->band = IEEE80211_BAND_2GHZ; 363 sband->band = IEEE80211_BAND_2GHZ;
320 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; 364 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -333,80 +377,160 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
333 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, 377 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
334 tx_chains, rx_chains); 378 tx_chains, rx_chains);
335 if (enable_vht) 379 if (enable_vht)
336 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap); 380 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
381 tx_chains, rx_chains);
337 382
338 if (n_channels != n_used) 383 if (n_channels != n_used)
339 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 384 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
340 n_used, n_channels); 385 n_used, n_channels);
341} 386}
342 387
388static int iwl_get_sku(const struct iwl_cfg *cfg,
389 const __le16 *nvm_sw)
390{
391 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
392 return le16_to_cpup(nvm_sw + SKU);
393 else
394 return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
395}
396
397static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
398 const __le16 *nvm_sw)
399{
400 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
401 return le16_to_cpup(nvm_sw + NVM_VERSION);
402 else
403 return le32_to_cpup((__le32 *)(nvm_sw +
404 NVM_VERSION_FAMILY_8000));
405}
406
407static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
408 const __le16 *nvm_sw)
409{
410 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
411 return le16_to_cpup(nvm_sw + RADIO_CFG);
412 else
413 return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
414}
415
416#define N_HW_ADDRS_MASK_FAMILY_8000 0xF
417static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
418 const __le16 *nvm_sw)
419{
420 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
421 return le16_to_cpup(nvm_sw + N_HW_ADDRS);
422 else
423 return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
424 & N_HW_ADDRS_MASK_FAMILY_8000;
425}
426
427static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
428 struct iwl_nvm_data *data,
429 u32 radio_cfg)
430{
431 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
432 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
433 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
434 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
435 data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
436 return;
437 }
438
439 /* set the radio configuration for family 8000 */
440 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg);
441 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
442 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
443 data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
444}
445
446static void iwl_set_hw_address(const struct iwl_cfg *cfg,
447 struct iwl_nvm_data *data,
448 const __le16 *nvm_sec)
449{
450 u8 hw_addr[ETH_ALEN];
451
452 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
453 memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
454 else
455 memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
456 ETH_ALEN);
457
458 /* The byte order is little endian 16 bit, meaning 214365 */
459 data->hw_addr[0] = hw_addr[1];
460 data->hw_addr[1] = hw_addr[0];
461 data->hw_addr[2] = hw_addr[3];
462 data->hw_addr[3] = hw_addr[2];
463 data->hw_addr[4] = hw_addr[5];
464 data->hw_addr[5] = hw_addr[4];
465}
466
343struct iwl_nvm_data * 467struct iwl_nvm_data *
344iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 468iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
345 const __le16 *nvm_hw, const __le16 *nvm_sw, 469 const __le16 *nvm_hw, const __le16 *nvm_sw,
346 const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains) 470 const __le16 *nvm_calib, const __le16 *regulatory,
471 const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
347{ 472{
348 struct iwl_nvm_data *data; 473 struct iwl_nvm_data *data;
349 u8 hw_addr[ETH_ALEN]; 474 u32 sku;
350 u16 radio_cfg, sku; 475 u32 radio_cfg;
351 476
352 data = kzalloc(sizeof(*data) + 477 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
353 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, 478 data = kzalloc(sizeof(*data) +
354 GFP_KERNEL); 479 sizeof(struct ieee80211_channel) *
480 IWL_NUM_CHANNELS,
481 GFP_KERNEL);
482 else
483 data = kzalloc(sizeof(*data) +
484 sizeof(struct ieee80211_channel) *
485 IWL_NUM_CHANNELS_FAMILY_8000,
486 GFP_KERNEL);
355 if (!data) 487 if (!data)
356 return NULL; 488 return NULL;
357 489
358 data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION); 490 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
359 491
360 radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG); 492 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
361 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); 493 iwl_set_radio_cfg(cfg, data, radio_cfg);
362 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
363 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
364 data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
365 data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
366 data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
367 494
368 sku = le16_to_cpup(nvm_sw + SKU); 495 sku = iwl_get_sku(cfg, nvm_sw);
369 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; 496 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
370 data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; 497 data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
371 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; 498 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
499 data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
372 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 500 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
373 data->sku_cap_11n_enable = false; 501 data->sku_cap_11n_enable = false;
374 502
375 /* check overrides (some devices have wrong NVM) */ 503 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
376 if (cfg->valid_tx_ant)
377 data->valid_tx_ant = cfg->valid_tx_ant;
378 if (cfg->valid_rx_ant)
379 data->valid_rx_ant = cfg->valid_rx_ant;
380 504
381 if (!data->valid_tx_ant || !data->valid_rx_ant) { 505 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
382 IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n", 506 /* Checking for required sections */
383 data->valid_tx_ant, data->valid_rx_ant); 507 if (!nvm_calib) {
384 kfree(data); 508 IWL_ERR_DEV(dev,
385 return NULL; 509 "Can't parse empty Calib NVM sections\n");
510 kfree(data);
511 return NULL;
512 }
513 /* in family 8000 Xtal calibration values moved to OTP */
514 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
515 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
386 } 516 }
387 517
388 data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS); 518 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
519 iwl_set_hw_address(cfg, data, nvm_hw);
389 520
390 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); 521 iwl_init_sbands(dev, cfg, data, nvm_sw,
391 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); 522 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
523 rx_chains);
524 } else {
525 /* MAC address in family 8000 */
526 iwl_set_hw_address(cfg, data, mac_override);
392 527
393 /* The byte order is little endian 16 bit, meaning 214365 */ 528 iwl_init_sbands(dev, cfg, data, regulatory,
394 memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN); 529 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
395 data->hw_addr[0] = hw_addr[1]; 530 rx_chains);
396 data->hw_addr[1] = hw_addr[0]; 531 }
397 data->hw_addr[2] = hw_addr[3];
398 data->hw_addr[3] = hw_addr[2];
399 data->hw_addr[4] = hw_addr[5];
400 data->hw_addr[5] = hw_addr[4];
401
402 iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
403 tx_chains, rx_chains);
404 532
405 data->calib_version = 255; /* TODO: 533 data->calib_version = 255;
406 this value will prevent some checks from
407 failing, we need to check if this
408 field is still needed, and if it does,
409 where is it in the NVM*/
410 534
411 return data; 535 return data;
412} 536}
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 0c4399aba8c6..c9c45a39d212 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -75,6 +75,7 @@
75struct iwl_nvm_data * 75struct iwl_nvm_data *
76iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 76iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
77 const __le16 *nvm_hw, const __le16 *nvm_sw, 77 const __le16 *nvm_hw, const __le16 *nvm_sw,
78 const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains); 78 const __le16 *nvm_calib, const __le16 *regulatory,
79 const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
79 80
80#endif /* __iwl_nvm_parse_h__ */ 81#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b5be51f3cd3d..ea29504ac617 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -119,7 +119,8 @@ struct iwl_cfg;
119 * @queue_not_full: notifies that a HW queue is not full any more. 119 * @queue_not_full: notifies that a HW queue is not full any more.
120 * Must be atomic and called with BH disabled. 120 * Must be atomic and called with BH disabled.
121 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that 121 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
122 * the radio is killed. May sleep. 122 * the radio is killed. Return %true if the device should be stopped by
123 * the transport immediately after the call. May sleep.
123 * @free_skb: allows the transport layer to free skbs that haven't been 124 * @free_skb: allows the transport layer to free skbs that haven't been
124 * reclaimed by the op_mode. This can happen when the driver is freed and 125 * reclaimed by the op_mode. This can happen when the driver is freed and
125 * there are Tx packets pending in the transport layer. 126 * there are Tx packets pending in the transport layer.
@@ -131,6 +132,8 @@ struct iwl_cfg;
131 * @nic_config: configure NIC, called before firmware is started. 132 * @nic_config: configure NIC, called before firmware is started.
132 * May sleep 133 * May sleep
133 * @wimax_active: invoked when WiMax becomes active. May sleep 134 * @wimax_active: invoked when WiMax becomes active. May sleep
135 * @enter_d0i3: configure the fw to enter d0i3. May sleep.
136 * @exit_d0i3: configure the fw to exit d0i3. May sleep.
134 */ 137 */
135struct iwl_op_mode_ops { 138struct iwl_op_mode_ops {
136 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 139 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -142,12 +145,14 @@ struct iwl_op_mode_ops {
142 struct iwl_device_cmd *cmd); 145 struct iwl_device_cmd *cmd);
143 void (*queue_full)(struct iwl_op_mode *op_mode, int queue); 146 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
144 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); 147 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
145 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 148 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
146 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); 149 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
147 void (*nic_error)(struct iwl_op_mode *op_mode); 150 void (*nic_error)(struct iwl_op_mode *op_mode);
148 void (*cmd_queue_full)(struct iwl_op_mode *op_mode); 151 void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
149 void (*nic_config)(struct iwl_op_mode *op_mode); 152 void (*nic_config)(struct iwl_op_mode *op_mode);
150 void (*wimax_active)(struct iwl_op_mode *op_mode); 153 void (*wimax_active)(struct iwl_op_mode *op_mode);
154 int (*enter_d0i3)(struct iwl_op_mode *op_mode);
155 int (*exit_d0i3)(struct iwl_op_mode *op_mode);
151}; 156};
152 157
153int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); 158int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -155,7 +160,7 @@ void iwl_opmode_deregister(const char *name);
155 160
156/** 161/**
157 * struct iwl_op_mode - operational mode 162 * struct iwl_op_mode - operational mode
158 * @ops - pointer to its own ops 163 * @ops: pointer to its own ops
159 * 164 *
160 * This holds an implementation of the mac80211 / fw API. 165 * This holds an implementation of the mac80211 / fw API.
161 */ 166 */
@@ -191,11 +196,11 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
191 op_mode->ops->queue_not_full(op_mode, queue); 196 op_mode->ops->queue_not_full(op_mode, queue);
192} 197}
193 198
194static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, 199static inline bool __must_check
195 bool state) 200iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
196{ 201{
197 might_sleep(); 202 might_sleep();
198 op_mode->ops->hw_rf_kill(op_mode, state); 203 return op_mode->ops->hw_rf_kill(op_mode, state);
199} 204}
200 205
201static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, 206static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
@@ -226,4 +231,22 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
226 op_mode->ops->wimax_active(op_mode); 231 op_mode->ops->wimax_active(op_mode);
227} 232}
228 233
234static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
235{
236 might_sleep();
237
238 if (!op_mode->ops->enter_d0i3)
239 return 0;
240 return op_mode->ops->enter_d0i3(op_mode);
241}
242
243static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
244{
245 might_sleep();
246
247 if (!op_mode->ops->exit_d0i3)
248 return 0;
249 return op_mode->ops->exit_d0i3(op_mode);
250}
251
229#endif /* __iwl_op_mode_h__ */ 252#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index fa77d63a277a..b761ac4822a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -72,7 +72,7 @@
72#include "iwl-trans.h" 72#include "iwl-trans.h"
73 73
74#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ 74#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
75#define IWL_NUM_PAPD_CH_GROUPS 4 75#define IWL_NUM_PAPD_CH_GROUPS 7
76#define IWL_NUM_TXP_CH_GROUPS 9 76#define IWL_NUM_TXP_CH_GROUPS 9
77 77
78struct iwl_phy_db_entry { 78struct iwl_phy_db_entry {
@@ -383,7 +383,7 @@ static int iwl_phy_db_send_all_channel_groups(
383 if (!entry) 383 if (!entry)
384 return -EINVAL; 384 return -EINVAL;
385 385
386 if (WARN_ON_ONCE(!entry->size)) 386 if (!entry->size)
387 continue; 387 continue;
388 388
389 /* Send the requested PHY DB section */ 389 /* Send the requested PHY DB section */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 100bd0d79681..5f657c501406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -95,7 +95,8 @@
95#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ 95#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
96#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) 96#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
97 97
98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 98#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
99 100
100#define APMG_RTC_INT_STT_RFKILL (0x10000000) 101#define APMG_RTC_INT_STT_RFKILL (0x10000000)
101 102
@@ -105,6 +106,33 @@
105/* Device NMI register */ 106/* Device NMI register */
106#define DEVICE_SET_NMI_REG 0x00a01c30 107#define DEVICE_SET_NMI_REG 0x00a01c30
107 108
109/* Shared registers (0x0..0x3ff, via target indirect or periphery */
110#define SHR_BASE 0x00a10000
111
112/* Shared GP1 register */
113#define SHR_APMG_GP1_REG 0x01dc
114#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG)
115#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004
116#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000
117
118/* Shared DL_CFG register */
119#define SHR_APMG_DL_CFG_REG 0x01c4
120#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG)
121#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0
122#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080
123#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100
124
125/* Shared APMG_XTAL_CFG register */
126#define SHR_APMG_XTAL_CFG_REG 0x1c0
127#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000
128
129/*
130 * Device reset for family 8000
131 * write to bit 24 in order to reset the CPU
132*/
133#define RELEASE_CPU_RESET (0x300C)
134#define RELEASE_CPU_RESET_BIT BIT(24)
135
108/***************************************************************************** 136/*****************************************************************************
109 * 7000/3000 series SHR DTS addresses * 137 * 7000/3000 series SHR DTS addresses *
110 *****************************************************************************/ 138 *****************************************************************************/
@@ -281,4 +309,43 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
281#define OSC_CLK (0xa04068) 309#define OSC_CLK (0xa04068)
282#define OSC_CLK_FORCE_CONTROL (0x8) 310#define OSC_CLK_FORCE_CONTROL (0x8)
283 311
312/* SECURE boot registers */
313#define LMPM_SECURE_BOOT_CONFIG_ADDR (0x100)
314enum secure_boot_config_reg {
315 LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
316 LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
317};
318
319#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
320#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
321enum secure_boot_status_reg {
322 LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000001,
323 LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
324 LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
325 LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
326 LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
327 LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
328};
329
330#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
331enum secure_load_status_reg {
332 LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
333 LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
334 LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
335 LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
336 LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
337};
338
339#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
340#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
341#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
342#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
343
344#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
345#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
346#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
347#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
348
349#define LMPM_SECURE_TIME_OUT (100)
350
284#endif /* __iwl_prph_h__ */ 351#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 1f065cf4a4ba..8cdb0dd618a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -193,12 +193,23 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
193 * @CMD_ASYNC: Return right away and don't wait for the response 193 * @CMD_ASYNC: Return right away and don't wait for the response
194 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 194 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
195 * response. The caller needs to call iwl_free_resp when done. 195 * response. The caller needs to call iwl_free_resp when done.
196 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
197 * command queue, but after other high priority commands. valid only
198 * with CMD_ASYNC.
199 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
200 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
201 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
202 * (i.e. mark it as non-idle).
196 */ 203 */
197enum CMD_MODE { 204enum CMD_MODE {
198 CMD_SYNC = 0, 205 CMD_SYNC = 0,
199 CMD_ASYNC = BIT(0), 206 CMD_ASYNC = BIT(0),
200 CMD_WANT_SKB = BIT(1), 207 CMD_WANT_SKB = BIT(1),
201 CMD_SEND_IN_RFKILL = BIT(2), 208 CMD_SEND_IN_RFKILL = BIT(2),
209 CMD_HIGH_PRIO = BIT(3),
210 CMD_SEND_IN_IDLE = BIT(4),
211 CMD_MAKE_TRANS_IDLE = BIT(5),
212 CMD_WAKE_UP_TRANS = BIT(6),
202}; 213};
203 214
204#define DEF_CMD_PAYLOAD_SIZE 320 215#define DEF_CMD_PAYLOAD_SIZE 320
@@ -335,6 +346,9 @@ enum iwl_d3_status {
335 * @STATUS_INT_ENABLED: interrupts are enabled 346 * @STATUS_INT_ENABLED: interrupts are enabled
336 * @STATUS_RFKILL: the HW RFkill switch is in KILL position 347 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
337 * @STATUS_FW_ERROR: the fw is in error state 348 * @STATUS_FW_ERROR: the fw is in error state
349 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
350 * are sent
351 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
338 */ 352 */
339enum iwl_trans_status { 353enum iwl_trans_status {
340 STATUS_SYNC_HCMD_ACTIVE, 354 STATUS_SYNC_HCMD_ACTIVE,
@@ -343,6 +357,8 @@ enum iwl_trans_status {
343 STATUS_INT_ENABLED, 357 STATUS_INT_ENABLED,
344 STATUS_RFKILL, 358 STATUS_RFKILL,
345 STATUS_FW_ERROR, 359 STATUS_FW_ERROR,
360 STATUS_TRANS_GOING_IDLE,
361 STATUS_TRANS_IDLE,
346}; 362};
347 363
348/** 364/**
@@ -377,7 +393,7 @@ struct iwl_trans_config {
377 bool rx_buf_size_8k; 393 bool rx_buf_size_8k;
378 bool bc_table_dword; 394 bool bc_table_dword;
379 unsigned int queue_watchdog_timeout; 395 unsigned int queue_watchdog_timeout;
380 const char **command_names; 396 const char *const *command_names;
381}; 397};
382 398
383struct iwl_trans; 399struct iwl_trans;
@@ -443,6 +459,11 @@ struct iwl_trans;
443 * @release_nic_access: let the NIC go to sleep. The "flags" parameter 459 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
444 * must be the same one that was sent before to the grab_nic_access. 460 * must be the same one that was sent before to the grab_nic_access.
445 * @set_bits_mask - set SRAM register according to value and mask. 461 * @set_bits_mask - set SRAM register according to value and mask.
462 * @ref: grab a reference to the transport/FW layers, disallowing
463 * certain low power states
464 * @unref: release a reference previously taken with @ref. Note that
465 * initially the reference count is 1, making an initial @unref
466 * necessary to allow low power states.
446 */ 467 */
447struct iwl_trans_ops { 468struct iwl_trans_ops {
448 469
@@ -489,6 +510,8 @@ struct iwl_trans_ops {
489 unsigned long *flags); 510 unsigned long *flags);
490 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, 511 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
491 u32 value); 512 u32 value);
513 void (*ref)(struct iwl_trans *trans);
514 void (*unref)(struct iwl_trans *trans);
492}; 515};
493 516
494/** 517/**
@@ -523,6 +546,7 @@ enum iwl_trans_state {
523 * starting the firmware, used for tracing 546 * starting the firmware, used for tracing
524 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the 547 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
525 * start of the 802.11 header in the @rx_mpdu_cmd 548 * start of the 802.11 header in the @rx_mpdu_cmd
549 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
526 */ 550 */
527struct iwl_trans { 551struct iwl_trans {
528 const struct iwl_trans_ops *ops; 552 const struct iwl_trans_ops *ops;
@@ -551,6 +575,8 @@ struct iwl_trans {
551 struct lockdep_map sync_cmd_lockdep_map; 575 struct lockdep_map sync_cmd_lockdep_map;
552#endif 576#endif
553 577
578 u64 dflt_pwr_limit;
579
554 /* pointer to trans specific struct */ 580 /* pointer to trans specific struct */
555 /*Ensure that this pointer will always be aligned to sizeof pointer */ 581 /*Ensure that this pointer will always be aligned to sizeof pointer */
556 char trans_specific[0] __aligned(sizeof(void *)); 582 char trans_specific[0] __aligned(sizeof(void *));
@@ -627,6 +653,18 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
627 return trans->ops->d3_resume(trans, status, test); 653 return trans->ops->d3_resume(trans, status, test);
628} 654}
629 655
656static inline void iwl_trans_ref(struct iwl_trans *trans)
657{
658 if (trans->ops->ref)
659 trans->ops->ref(trans);
660}
661
662static inline void iwl_trans_unref(struct iwl_trans *trans)
663{
664 if (trans->ops->unref)
665 trans->ops->unref(trans);
666}
667
630static inline int iwl_trans_send_cmd(struct iwl_trans *trans, 668static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
631 struct iwl_host_cmd *cmd) 669 struct iwl_host_cmd *cmd)
632{ 670{
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index f98ec2b23898..ccdd3b7c4cce 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,8 +2,8 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o 2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o 3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o power_legacy.o bt-coex.o 5iwlmvm-y += power.o coex.o
6iwlmvm-y += led.o tt.o 6iwlmvm-y += led.o tt.o offloading.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o 8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
9 9
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 18a895a949d4..685f7e8e6943 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -61,9 +61,11 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#include <linux/ieee80211.h>
65#include <linux/etherdevice.h>
64#include <net/mac80211.h> 66#include <net/mac80211.h>
65 67
66#include "fw-api-bt-coex.h" 68#include "fw-api-coex.h"
67#include "iwl-modparams.h" 69#include "iwl-modparams.h"
68#include "mvm.h" 70#include "mvm.h"
69#include "iwl-debug.h" 71#include "iwl-debug.h"
@@ -305,6 +307,215 @@ static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
305 cpu_to_le32(0x33113311), 307 cpu_to_le32(0x33113311),
306}; 308};
307 309
310struct corunning_block_luts {
311 u8 range;
312 __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
313};
314
315/*
316 * Ranges for the antenna coupling calibration / co-running block LUT:
317 * LUT0: [ 0, 12[
318 * LUT1: [12, 20[
319 * LUT2: [20, 21[
320 * LUT3: [21, 23[
321 * LUT4: [23, 27[
322 * LUT5: [27, 30[
323 * LUT6: [30, 32[
324 * LUT7: [32, 33[
325 * LUT8: [33, - [
326 */
327static const struct corunning_block_luts antenna_coupling_ranges[] = {
328 {
329 .range = 0,
330 .lut20 = {
331 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
332 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
333 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
334 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
335 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
336 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
337 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
338 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
339 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
340 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
341 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
342 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
343 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
344 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
345 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
346 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
347 },
348 },
349 {
350 .range = 12,
351 .lut20 = {
352 cpu_to_le32(0x00000001), cpu_to_le32(0x00000000),
353 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
354 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
355 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
356 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
357 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
358 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
359 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
360 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
361 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
362 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
363 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
364 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
365 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
366 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
367 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
368 },
369 },
370 {
371 .range = 20,
372 .lut20 = {
373 cpu_to_le32(0x00000002), cpu_to_le32(0x00000000),
374 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
375 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
376 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
377 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
378 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
379 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
380 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
381 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
382 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
383 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
384 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
385 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
386 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
387 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
388 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
389 },
390 },
391 {
392 .range = 21,
393 .lut20 = {
394 cpu_to_le32(0x00000003), cpu_to_le32(0x00000000),
395 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
396 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
397 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
398 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
399 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
400 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
401 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
402 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
403 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
404 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
405 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
406 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
407 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
408 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
409 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
410 },
411 },
412 {
413 .range = 23,
414 .lut20 = {
415 cpu_to_le32(0x00000004), cpu_to_le32(0x00000000),
416 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
417 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
418 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
419 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
420 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
421 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
422 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
423 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
424 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
425 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
426 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
427 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
428 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
429 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
430 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
431 },
432 },
433 {
434 .range = 27,
435 .lut20 = {
436 cpu_to_le32(0x00000005), cpu_to_le32(0x00000000),
437 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
438 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
439 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
440 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
441 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
442 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
443 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
444 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
445 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
446 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
447 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
448 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
449 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
450 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
451 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
452 },
453 },
454 {
455 .range = 30,
456 .lut20 = {
457 cpu_to_le32(0x00000006), cpu_to_le32(0x00000000),
458 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
459 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
460 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
461 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
462 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
463 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
464 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
465 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
466 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
467 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
468 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
469 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
470 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
471 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
472 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
473 },
474 },
475 {
476 .range = 32,
477 .lut20 = {
478 cpu_to_le32(0x00000007), cpu_to_le32(0x00000000),
479 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
480 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
481 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
482 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
483 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
484 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
485 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
486 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
487 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
488 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
489 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
490 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
491 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
492 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
493 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
494 },
495 },
496 {
497 .range = 33,
498 .lut20 = {
499 cpu_to_le32(0x00000008), cpu_to_le32(0x00000000),
500 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
501 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
502 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
503 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
504 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
505 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
506 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
507 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
508 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
509 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
510 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
511 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
512 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
513 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
514 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
515 },
516 },
517};
518
308static enum iwl_bt_coex_lut_type 519static enum iwl_bt_coex_lut_type
309iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) 520iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
310{ 521{
@@ -378,7 +589,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
378 589
379 flags = iwlwifi_mod_params.bt_coex_active ? 590 flags = iwlwifi_mod_params.bt_coex_active ?
380 BT_COEX_NW : BT_COEX_DISABLE; 591 BT_COEX_NW : BT_COEX_DISABLE;
381 flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
382 bt_cmd->flags = cpu_to_le32(flags); 592 bt_cmd->flags = cpu_to_le32(flags);
383 593
384 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE | 594 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
@@ -391,14 +601,26 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
391 BT_VALID_LUT | 601 BT_VALID_LUT |
392 BT_VALID_WIFI_RX_SW_PRIO_BOOST | 602 BT_VALID_WIFI_RX_SW_PRIO_BOOST |
393 BT_VALID_WIFI_TX_SW_PRIO_BOOST | 603 BT_VALID_WIFI_TX_SW_PRIO_BOOST |
394 BT_VALID_CORUN_LUT_20 |
395 BT_VALID_CORUN_LUT_40 |
396 BT_VALID_ANT_ISOLATION | 604 BT_VALID_ANT_ISOLATION |
397 BT_VALID_ANT_ISOLATION_THRS | 605 BT_VALID_ANT_ISOLATION_THRS |
398 BT_VALID_TXTX_DELTA_FREQ_THRS | 606 BT_VALID_TXTX_DELTA_FREQ_THRS |
399 BT_VALID_TXRX_MAX_FREQ_0 | 607 BT_VALID_TXRX_MAX_FREQ_0 |
400 BT_VALID_SYNC_TO_SCO); 608 BT_VALID_SYNC_TO_SCO);
401 609
610 if (IWL_MVM_BT_COEX_SYNC2SCO)
611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
612
613 if (IWL_MVM_BT_COEX_CORUNNING) {
614 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
615 BT_VALID_CORUN_LUT_40);
616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
617 }
618
619 if (IWL_MVM_BT_COEX_MPLUT) {
620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
621 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
622 }
623
402 if (mvm->cfg->bt_shared_single_ant) 624 if (mvm->cfg->bt_shared_single_ant)
403 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, 625 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
404 sizeof(iwl_single_shared_ant)); 626 sizeof(iwl_single_shared_ant));
@@ -406,6 +628,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
406 memcpy(&bt_cmd->decision_lut, iwl_combined_lookup, 628 memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
407 sizeof(iwl_combined_lookup)); 629 sizeof(iwl_combined_lookup));
408 630
631 /* Take first Co-running block LUT to get started */
632 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
633 sizeof(bt_cmd->bt4_corun_lut20));
634 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
635 sizeof(bt_cmd->bt4_corun_lut40));
636
409 memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost, 637 memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
410 sizeof(iwl_bt_prio_boost)); 638 sizeof(iwl_bt_prio_boost));
411 memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut, 639 memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
@@ -489,36 +717,26 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
489 return ret; 717 return ret;
490} 718}
491 719
492static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, 720int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
493 bool enable)
494{ 721{
495 struct iwl_bt_coex_cmd *bt_cmd; 722 struct iwl_bt_coex_cmd *bt_cmd;
496 /* Send ASYNC since this can be sent from an atomic context */ 723 /* Send ASYNC since this can be sent from an atomic context */
497 struct iwl_host_cmd cmd = { 724 struct iwl_host_cmd cmd = {
498 .id = BT_CONFIG, 725 .id = BT_CONFIG,
499 .len = { sizeof(*bt_cmd), }, 726 .len = { sizeof(*bt_cmd), },
500 .dataflags = { IWL_HCMD_DFL_DUP, }, 727 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
501 .flags = CMD_ASYNC, 728 .flags = CMD_ASYNC,
502 }; 729 };
503
504 struct ieee80211_sta *sta;
505 struct iwl_mvm_sta *mvmsta; 730 struct iwl_mvm_sta *mvmsta;
506 int ret; 731 int ret;
507 732
508 if (sta_id == IWL_MVM_STATION_COUNT) 733 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
734 if (!mvmsta)
509 return 0; 735 return 0;
510 736
511 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
512 lockdep_is_held(&mvm->mutex));
513
514 /* This can happen if the station has been removed right now */
515 if (IS_ERR_OR_NULL(sta))
516 return 0;
517
518 mvmsta = iwl_mvm_sta_from_mac80211(sta);
519
520 /* nothing to do */ 737 /* nothing to do */
521 if (mvmsta->bt_reduced_txpower == enable) 738 if (mvmsta->bt_reduced_txpower_dbg ||
739 mvmsta->bt_reduced_txpower == enable)
522 return 0; 740 return 0;
523 741
524 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); 742 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -552,6 +770,7 @@ struct iwl_bt_iterator_data {
552 bool reduced_tx_power; 770 bool reduced_tx_power;
553 struct ieee80211_chanctx_conf *primary; 771 struct ieee80211_chanctx_conf *primary;
554 struct ieee80211_chanctx_conf *secondary; 772 struct ieee80211_chanctx_conf *secondary;
773 bool primary_ll;
555}; 774};
556 775
557static inline 776static inline
@@ -577,72 +796,113 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
577 struct iwl_mvm *mvm = data->mvm; 796 struct iwl_mvm *mvm = data->mvm;
578 struct ieee80211_chanctx_conf *chanctx_conf; 797 struct ieee80211_chanctx_conf *chanctx_conf;
579 enum ieee80211_smps_mode smps_mode; 798 enum ieee80211_smps_mode smps_mode;
799 u32 bt_activity_grading;
580 int ave_rssi; 800 int ave_rssi;
581 801
582 lockdep_assert_held(&mvm->mutex); 802 lockdep_assert_held(&mvm->mutex);
583 803
584 if (vif->type != NL80211_IFTYPE_STATION && 804 switch (vif->type) {
585 vif->type != NL80211_IFTYPE_AP) 805 case NL80211_IFTYPE_STATION:
586 return; 806 /* default smps_mode for BSS / P2P client is AUTOMATIC */
807 smps_mode = IEEE80211_SMPS_AUTOMATIC;
808 data->num_bss_ifaces++;
587 809
588 smps_mode = IEEE80211_SMPS_AUTOMATIC; 810 /*
811 * Count unassoc BSSes, relax SMSP constraints
812 * and disable reduced Tx Power
813 */
814 if (!vif->bss_conf.assoc) {
815 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
816 smps_mode);
817 if (iwl_mvm_bt_coex_reduced_txp(mvm,
818 mvmvif->ap_sta_id,
819 false))
820 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
821 return;
822 }
823 break;
824 case NL80211_IFTYPE_AP:
825 /* default smps_mode for AP / GO is OFF */
826 smps_mode = IEEE80211_SMPS_OFF;
827 if (!mvmvif->ap_ibss_active) {
828 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
829 smps_mode);
830 return;
831 }
832
833 /* the Ack / Cts kill mask must be default if AP / GO */
834 data->reduced_tx_power = false;
835 break;
836 default:
837 return;
838 }
589 839
590 chanctx_conf = rcu_dereference(vif->chanctx_conf); 840 chanctx_conf = rcu_dereference(vif->chanctx_conf);
591 841
592 /* If channel context is invalid or not on 2.4GHz .. */ 842 /* If channel context is invalid or not on 2.4GHz .. */
593 if ((!chanctx_conf || 843 if ((!chanctx_conf ||
594 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { 844 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
595 /* ... and it is an associated STATION, relax constraints */ 845 /* ... relax constraints and disable rssi events */
596 if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc) 846 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
597 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, 847 smps_mode);
598 smps_mode); 848 if (vif->type == NL80211_IFTYPE_STATION)
599 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); 849 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
600 return; 850 return;
601 } 851 }
602 852
603 /* SoftAP / GO will always be primary */ 853 bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
604 if (vif->type == NL80211_IFTYPE_AP) { 854 if (bt_activity_grading >= BT_HIGH_TRAFFIC)
605 if (!mvmvif->ap_ibss_active) 855 smps_mode = IEEE80211_SMPS_STATIC;
606 return; 856 else if (bt_activity_grading >= BT_LOW_TRAFFIC)
857 smps_mode = vif->type == NL80211_IFTYPE_AP ?
858 IEEE80211_SMPS_OFF :
859 IEEE80211_SMPS_DYNAMIC;
860 IWL_DEBUG_COEX(data->mvm,
861 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
862 mvmvif->id, data->notif->bt_status, bt_activity_grading,
863 smps_mode);
607 864
608 /* the Ack / Cts kill mask must be default if AP / GO */ 865 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
609 data->reduced_tx_power = false;
610 866
611 if (chanctx_conf == data->primary) 867 /* low latency is always primary */
612 return; 868 if (iwl_mvm_vif_low_latency(mvmvif)) {
869 data->primary_ll = true;
613 870
614 /* downgrade the current primary no matter what its type is */
615 data->secondary = data->primary; 871 data->secondary = data->primary;
616 data->primary = chanctx_conf; 872 data->primary = chanctx_conf;
617 return;
618 } 873 }
619 874
620 data->num_bss_ifaces++; 875 if (vif->type == NL80211_IFTYPE_AP) {
876 if (!mvmvif->ap_ibss_active)
877 return;
621 878
622 /* we are now a STA / P2P Client, and take associated ones only */ 879 if (chanctx_conf == data->primary)
623 if (!vif->bss_conf.assoc) 880 return;
881
882 if (!data->primary_ll) {
883 /*
884 * downgrade the current primary no matter what its
885 * type is.
886 */
887 data->secondary = data->primary;
888 data->primary = chanctx_conf;
889 } else {
890 /* there is low latency vif - we will be secondary */
891 data->secondary = chanctx_conf;
892 }
624 return; 893 return;
894 }
625 895
626 /* STA / P2P Client, try to be primary if first vif */ 896 /*
897 * STA / P2P Client, try to be primary if first vif. If we are in low
898 * latency mode, we are already in primary and just don't do much
899 */
627 if (!data->primary || data->primary == chanctx_conf) 900 if (!data->primary || data->primary == chanctx_conf)
628 data->primary = chanctx_conf; 901 data->primary = chanctx_conf;
629 else if (!data->secondary) 902 else if (!data->secondary)
630 /* if secondary is not NULL, it might be a GO */ 903 /* if secondary is not NULL, it might be a GO */
631 data->secondary = chanctx_conf; 904 data->secondary = chanctx_conf;
632 905
633 if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
634 smps_mode = IEEE80211_SMPS_STATIC;
635 else if (le32_to_cpu(data->notif->bt_activity_grading) >=
636 BT_LOW_TRAFFIC)
637 smps_mode = IEEE80211_SMPS_DYNAMIC;
638
639 IWL_DEBUG_COEX(data->mvm,
640 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
641 mvmvif->id, data->notif->bt_status,
642 data->notif->bt_activity_grading, smps_mode);
643
644 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
645
646 /* don't reduce the Tx power if in loose scheme */ 906 /* don't reduce the Tx power if in loose scheme */
647 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || 907 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
648 mvm->cfg->bt_shared_single_ant) { 908 mvm->cfg->bt_shared_single_ant) {
@@ -918,8 +1178,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
918#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) 1178#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
919#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) 1179#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
920 1180
921u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm, 1181u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
922 struct ieee80211_sta *sta) 1182 struct ieee80211_sta *sta)
923{ 1183{
924 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1184 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
925 enum iwl_bt_coex_lut_type lut_type; 1185 enum iwl_bt_coex_lut_type lut_type;
@@ -955,6 +1215,38 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
955 return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT; 1215 return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
956} 1216}
957 1217
1218u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1219 struct ieee80211_tx_info *info, u8 ac)
1220{
1221 __le16 fc = hdr->frame_control;
1222
1223 if (info->band != IEEE80211_BAND_2GHZ)
1224 return 0;
1225
1226 if (unlikely(mvm->bt_tx_prio))
1227 return mvm->bt_tx_prio - 1;
1228
1229 /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
1230 if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
1231 is_multicast_ether_addr(hdr->addr1) ||
1232 ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
1233 ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
1234 return 3;
1235
1236 switch (ac) {
1237 case IEEE80211_AC_BE:
1238 return 1;
1239 case IEEE80211_AC_VO:
1240 return 3;
1241 case IEEE80211_AC_VI:
1242 return 2;
1243 default:
1244 break;
1245 }
1246
1247 return 0;
1248}
1249
958void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) 1250void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
959{ 1251{
960 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) 1252 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
@@ -962,3 +1254,69 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
962 1254
963 iwl_mvm_bt_coex_notif_handle(mvm); 1255 iwl_mvm_bt_coex_notif_handle(mvm);
964} 1256}
1257
1258int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1259 struct iwl_rx_cmd_buffer *rxb,
1260 struct iwl_device_cmd *dev_cmd)
1261{
1262 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1263 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
1264 u8 __maybe_unused lower_bound, upper_bound;
1265 u8 lut;
1266
1267 struct iwl_bt_coex_cmd *bt_cmd;
1268 struct iwl_host_cmd cmd = {
1269 .id = BT_CONFIG,
1270 .len = { sizeof(*bt_cmd), },
1271 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1272 .flags = CMD_SYNC,
1273 };
1274
1275 if (!IWL_MVM_BT_COEX_CORUNNING)
1276 return 0;
1277
1278 lockdep_assert_held(&mvm->mutex);
1279
1280 if (ant_isolation == mvm->last_ant_isol)
1281 return 0;
1282
1283 for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
1284 if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
1285 break;
1286
1287 lower_bound = antenna_coupling_ranges[lut].range;
1288
1289 if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
1290 upper_bound = antenna_coupling_ranges[lut + 1].range;
1291 else
1292 upper_bound = antenna_coupling_ranges[lut].range;
1293
1294 IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
1295 ant_isolation, lower_bound, upper_bound, lut);
1296
1297 mvm->last_ant_isol = ant_isolation;
1298
1299 if (mvm->last_corun_lut == lut)
1300 return 0;
1301
1302 mvm->last_corun_lut = lut;
1303
1304 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
1305 if (!bt_cmd)
1306 return 0;
1307 cmd.data[0] = bt_cmd;
1308
1309 bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
1310 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
1311 BT_VALID_CORUN_LUT_20 |
1312 BT_VALID_CORUN_LUT_40);
1313
1314 /* For the moment, use the same LUT for 20GHz and 40GHz */
1315 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
1316 sizeof(bt_cmd->bt4_corun_lut20));
1317
1318 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
1319 sizeof(bt_cmd->bt4_corun_lut40));
1320
1321 return 0;
1322}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 036857698565..51685693af2e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -78,5 +78,9 @@
78#define IWL_MVM_PS_SNOOZE_INTERVAL 25 78#define IWL_MVM_PS_SNOOZE_INTERVAL 25
79#define IWL_MVM_PS_SNOOZE_WINDOW 50 79#define IWL_MVM_PS_SNOOZE_WINDOW 50
80#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25 80#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
81#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
82#define IWL_MVM_BT_COEX_SYNC2SCO 1
83#define IWL_MVM_BT_COEX_CORUNNING 1
84#define IWL_MVM_BT_COEX_MPLUT 1
81 85
82#endif /* __MVM_CONSTANTS_H */ 86#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index f36a7ee0267f..e56f5a0edf85 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -376,139 +376,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
376 return err; 376 return err;
377} 377}
378 378
379static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
380 struct ieee80211_vif *vif)
381{
382 union {
383 struct iwl_proto_offload_cmd_v1 v1;
384 struct iwl_proto_offload_cmd_v2 v2;
385 struct iwl_proto_offload_cmd_v3_small v3s;
386 struct iwl_proto_offload_cmd_v3_large v3l;
387 } cmd = {};
388 struct iwl_host_cmd hcmd = {
389 .id = PROT_OFFLOAD_CONFIG_CMD,
390 .flags = CMD_SYNC,
391 .data[0] = &cmd,
392 .dataflags[0] = IWL_HCMD_DFL_DUP,
393 };
394 struct iwl_proto_offload_cmd_common *common;
395 u32 enabled = 0, size;
396 u32 capa_flags = mvm->fw->ucode_capa.flags;
397#if IS_ENABLED(CONFIG_IPV6)
398 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
399 int i;
400
401 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
402 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
403 struct iwl_ns_config *nsc;
404 struct iwl_targ_addr *addrs;
405 int n_nsc, n_addrs;
406 int c;
407
408 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
409 nsc = cmd.v3s.ns_config;
410 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
411 addrs = cmd.v3s.targ_addrs;
412 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
413 } else {
414 nsc = cmd.v3l.ns_config;
415 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
416 addrs = cmd.v3l.targ_addrs;
417 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
418 }
419
420 if (mvmvif->num_target_ipv6_addrs)
421 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
422
423 /*
424 * For each address we have (and that will fit) fill a target
425 * address struct and combine for NS offload structs with the
426 * solicited node addresses.
427 */
428 for (i = 0, c = 0;
429 i < mvmvif->num_target_ipv6_addrs &&
430 i < n_addrs && c < n_nsc; i++) {
431 struct in6_addr solicited_addr;
432 int j;
433
434 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
435 &solicited_addr);
436 for (j = 0; j < c; j++)
437 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
438 &solicited_addr) == 0)
439 break;
440 if (j == c)
441 c++;
442 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
443 addrs[i].config_num = cpu_to_le32(j);
444 nsc[j].dest_ipv6_addr = solicited_addr;
445 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
446 }
447
448 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
449 cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
450 else
451 cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
452 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
453 if (mvmvif->num_target_ipv6_addrs) {
454 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
455 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
456 }
457
458 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
459 sizeof(mvmvif->target_ipv6_addrs[0]));
460
461 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
462 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
463 memcpy(cmd.v2.target_ipv6_addr[i],
464 &mvmvif->target_ipv6_addrs[i],
465 sizeof(cmd.v2.target_ipv6_addr[i]));
466 } else {
467 if (mvmvif->num_target_ipv6_addrs) {
468 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
469 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
470 }
471
472 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
473 sizeof(mvmvif->target_ipv6_addrs[0]));
474
475 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
476 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
477 memcpy(cmd.v1.target_ipv6_addr[i],
478 &mvmvif->target_ipv6_addrs[i],
479 sizeof(cmd.v1.target_ipv6_addr[i]));
480 }
481#endif
482
483 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
484 common = &cmd.v3s.common;
485 size = sizeof(cmd.v3s);
486 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
487 common = &cmd.v3l.common;
488 size = sizeof(cmd.v3l);
489 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
490 common = &cmd.v2.common;
491 size = sizeof(cmd.v2);
492 } else {
493 common = &cmd.v1.common;
494 size = sizeof(cmd.v1);
495 }
496
497 if (vif->bss_conf.arp_addr_cnt) {
498 enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
499 common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
500 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
501 }
502
503 if (!enabled)
504 return 0;
505
506 common->enabled = cpu_to_le32(enabled);
507
508 hcmd.len[0] = size;
509 return iwl_mvm_send_cmd(mvm, &hcmd);
510}
511
512enum iwl_mvm_tcp_packet_type { 379enum iwl_mvm_tcp_packet_type {
513 MVM_TCP_TX_SYN, 380 MVM_TCP_TX_SYN,
514 MVM_TCP_RX_SYNACK, 381 MVM_TCP_RX_SYNACK,
@@ -846,8 +713,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
846 quota_cmd.quotas[0].id_and_color = 713 quota_cmd.quotas[0].id_and_color =
847 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 714 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
848 mvmvif->phy_ctxt->color)); 715 mvmvif->phy_ctxt->color));
849 quota_cmd.quotas[0].quota = cpu_to_le32(100); 716 quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
850 quota_cmd.quotas[0].max_duration = cpu_to_le32(1000); 717 quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
851 718
852 for (i = 1; i < MAX_BINDINGS; i++) 719 for (i = 1; i < MAX_BINDINGS; i++)
853 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); 720 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
@@ -927,6 +794,20 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
927 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 794 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
928} 795}
929 796
797static int
798iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
799 const struct iwl_wowlan_config_cmd_v3 *cmd)
800{
801 /* start only with the v2 part of the command */
802 u16 cmd_len = sizeof(cmd->common);
803
804 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
805 cmd_len = sizeof(*cmd);
806
807 return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
808 cmd_len, cmd);
809}
810
930static int __iwl_mvm_suspend(struct ieee80211_hw *hw, 811static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
931 struct cfg80211_wowlan *wowlan, 812 struct cfg80211_wowlan *wowlan,
932 bool test) 813 bool test)
@@ -939,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
939 struct iwl_mvm_vif *mvmvif; 820 struct iwl_mvm_vif *mvmvif;
940 struct ieee80211_sta *ap_sta; 821 struct ieee80211_sta *ap_sta;
941 struct iwl_mvm_sta *mvm_ap_sta; 822 struct iwl_mvm_sta *mvm_ap_sta;
942 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 823 struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
943 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 824 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
944 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 825 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
945 struct iwl_d3_manager_config d3_cfg_cmd_data = { 826 struct iwl_d3_manager_config d3_cfg_cmd_data = {
@@ -961,9 +842,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
961 .tkip = &tkip_cmd, 842 .tkip = &tkip_cmd,
962 .use_tkip = false, 843 .use_tkip = false,
963 }; 844 };
964 int ret, i; 845 int ret;
965 int len __maybe_unused; 846 int len __maybe_unused;
966 u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
967 847
968 if (!wowlan) { 848 if (!wowlan) {
969 /* 849 /*
@@ -980,8 +860,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
980 860
981 mutex_lock(&mvm->mutex); 861 mutex_lock(&mvm->mutex);
982 862
983 old_aux_sta_id = mvm->aux_sta.sta_id;
984
985 /* see if there's only a single BSS vif and it's associated */ 863 /* see if there's only a single BSS vif and it's associated */
986 ieee80211_iterate_active_interfaces_atomic( 864 ieee80211_iterate_active_interfaces_atomic(
987 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 865 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1005,49 +883,41 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1005 883
1006 mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; 884 mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
1007 885
1008 /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */ 886 /* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
1009 887
1010 wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported; 888 wowlan_config_cmd.common.is_11n_connection =
889 ap_sta->ht_cap.ht_supported;
1011 890
1012 /* Query the last used seqno and set it */ 891 /* Query the last used seqno and set it */
1013 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); 892 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
1014 if (ret < 0) 893 if (ret < 0)
1015 goto out_noreset; 894 goto out_noreset;
1016 wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret); 895 wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
1017 896
1018 /* 897 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
1019 * For QoS counters, we store the one to use next, so subtract 0x10
1020 * since the uCode will add 0x10 *before* using the value while we
1021 * increment after using the value (i.e. store the next value to use).
1022 */
1023 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1024 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
1025 seq -= 0x10;
1026 wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
1027 }
1028 898
1029 if (wowlan->disconnect) 899 if (wowlan->disconnect)
1030 wowlan_config_cmd.wakeup_filter |= 900 wowlan_config_cmd.common.wakeup_filter |=
1031 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 901 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
1032 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 902 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
1033 if (wowlan->magic_pkt) 903 if (wowlan->magic_pkt)
1034 wowlan_config_cmd.wakeup_filter |= 904 wowlan_config_cmd.common.wakeup_filter |=
1035 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); 905 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
1036 if (wowlan->gtk_rekey_failure) 906 if (wowlan->gtk_rekey_failure)
1037 wowlan_config_cmd.wakeup_filter |= 907 wowlan_config_cmd.common.wakeup_filter |=
1038 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); 908 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
1039 if (wowlan->eap_identity_req) 909 if (wowlan->eap_identity_req)
1040 wowlan_config_cmd.wakeup_filter |= 910 wowlan_config_cmd.common.wakeup_filter |=
1041 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); 911 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
1042 if (wowlan->four_way_handshake) 912 if (wowlan->four_way_handshake)
1043 wowlan_config_cmd.wakeup_filter |= 913 wowlan_config_cmd.common.wakeup_filter |=
1044 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); 914 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
1045 if (wowlan->n_patterns) 915 if (wowlan->n_patterns)
1046 wowlan_config_cmd.wakeup_filter |= 916 wowlan_config_cmd.common.wakeup_filter |=
1047 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); 917 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
1048 918
1049 if (wowlan->rfkill_release) 919 if (wowlan->rfkill_release)
1050 wowlan_config_cmd.wakeup_filter |= 920 wowlan_config_cmd.common.wakeup_filter |=
1051 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 921 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
1052 922
1053 if (wowlan->tcp) { 923 if (wowlan->tcp) {
@@ -1055,7 +925,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1055 * Set the "link change" (really "link lost") flag as well 925 * Set the "link change" (really "link lost") flag as well
1056 * since that implies losing the TCP connection. 926 * since that implies losing the TCP connection.
1057 */ 927 */
1058 wowlan_config_cmd.wakeup_filter |= 928 wowlan_config_cmd.common.wakeup_filter |=
1059 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | 929 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
1060 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | 930 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
1061 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | 931 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
@@ -1067,16 +937,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1067 iwl_trans_stop_device(mvm->trans); 937 iwl_trans_stop_device(mvm->trans);
1068 938
1069 /* 939 /*
1070 * The D3 firmware still hardcodes the AP station ID for the
1071 * BSS we're associated with as 0. Store the real STA ID here
1072 * and assign 0. When we leave this function, we'll restore
1073 * the original value for the resume code.
1074 */
1075 old_ap_sta_id = mvm_ap_sta->sta_id;
1076 mvm_ap_sta->sta_id = 0;
1077 mvmvif->ap_sta_id = 0;
1078
1079 /*
1080 * Set the HW restart bit -- this is mostly true as we're 940 * Set the HW restart bit -- this is mostly true as we're
1081 * going to load new firmware and reprogram that, though 941 * going to load new firmware and reprogram that, though
1082 * the reprogramming is going to be manual to avoid adding 942 * the reprogramming is going to be manual to avoid adding
@@ -1096,16 +956,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1096 mvm->ptk_ivlen = 0; 956 mvm->ptk_ivlen = 0;
1097 mvm->ptk_icvlen = 0; 957 mvm->ptk_icvlen = 0;
1098 958
1099 /*
1100 * The D3 firmware still hardcodes the AP station ID for the
1101 * BSS we're associated with as 0. As a result, we have to move
1102 * the auxiliary station to ID 1 so the ID 0 remains free for
1103 * the AP station for later.
1104 * We set the sta_id to 1 here, and reset it to its previous
1105 * value (that we stored above) later.
1106 */
1107 mvm->aux_sta.sta_id = 1;
1108
1109 ret = iwl_mvm_load_d3_fw(mvm); 959 ret = iwl_mvm_load_d3_fw(mvm);
1110 if (ret) 960 if (ret)
1111 goto out; 961 goto out;
@@ -1173,9 +1023,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1173 } 1023 }
1174 } 1024 }
1175 1025
1176 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 1026 ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
1177 CMD_SYNC, sizeof(wowlan_config_cmd),
1178 &wowlan_config_cmd);
1179 if (ret) 1027 if (ret)
1180 goto out; 1028 goto out;
1181 1029
@@ -1183,7 +1031,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1183 if (ret) 1031 if (ret)
1184 goto out; 1032 goto out;
1185 1033
1186 ret = iwl_mvm_send_proto_offload(mvm, vif); 1034 ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
1187 if (ret) 1035 if (ret)
1188 goto out; 1036 goto out;
1189 1037
@@ -1191,11 +1039,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1191 if (ret) 1039 if (ret)
1192 goto out; 1040 goto out;
1193 1041
1194 ret = iwl_mvm_power_update_device_mode(mvm); 1042 ret = iwl_mvm_power_update_device(mvm);
1195 if (ret) 1043 if (ret)
1196 goto out; 1044 goto out;
1197 1045
1198 ret = iwl_mvm_power_update_mode(mvm, vif); 1046 ret = iwl_mvm_power_update_mac(mvm, vif);
1199 if (ret) 1047 if (ret)
1200 goto out; 1048 goto out;
1201 1049
@@ -1222,10 +1070,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1222 1070
1223 iwl_trans_d3_suspend(mvm->trans, test); 1071 iwl_trans_d3_suspend(mvm->trans, test);
1224 out: 1072 out:
1225 mvm->aux_sta.sta_id = old_aux_sta_id;
1226 mvm_ap_sta->sta_id = old_ap_sta_id;
1227 mvmvif->ap_sta_id = old_ap_sta_id;
1228
1229 if (ret < 0) 1073 if (ret < 0)
1230 ieee80211_restart_hw(mvm->hw); 1074 ieee80211_restart_hw(mvm->hw);
1231 out_noreset: 1075 out_noreset:
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 0e29cd83a06a..9b59e1d7ae71 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -185,7 +185,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
185 185
186 mutex_lock(&mvm->mutex); 186 mutex_lock(&mvm->mutex);
187 iwl_dbgfs_update_pm(mvm, vif, param, val); 187 iwl_dbgfs_update_pm(mvm, vif, param, val);
188 ret = iwl_mvm_power_update_mode(mvm, vif); 188 ret = iwl_mvm_power_update_mac(mvm, vif);
189 mutex_unlock(&mvm->mutex); 189 mutex_unlock(&mvm->mutex);
190 190
191 return ret ?: count; 191 return ret ?: count;
@@ -202,7 +202,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
202 int bufsz = sizeof(buf); 202 int bufsz = sizeof(buf);
203 int pos; 203 int pos;
204 204
205 pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz); 205 pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
206 206
207 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 207 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
208} 208}
@@ -225,6 +225,29 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
225 225
226 ap_sta_id = mvmvif->ap_sta_id; 226 ap_sta_id = mvmvif->ap_sta_id;
227 227
228 switch (ieee80211_vif_type_p2p(vif)) {
229 case NL80211_IFTYPE_ADHOC:
230 pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
231 break;
232 case NL80211_IFTYPE_STATION:
233 pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
234 break;
235 case NL80211_IFTYPE_AP:
236 pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
237 break;
238 case NL80211_IFTYPE_P2P_CLIENT:
239 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
240 break;
241 case NL80211_IFTYPE_P2P_GO:
242 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
243 break;
244 case NL80211_IFTYPE_P2P_DEVICE:
245 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
246 break;
247 default:
248 break;
249 }
250
228 pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n", 251 pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
229 mvmvif->id, mvmvif->color); 252 mvmvif->id, mvmvif->color);
230 pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", 253 pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
@@ -249,9 +272,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
249 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 272 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
250 273
251 pos += scnprintf(buf+pos, bufsz-pos, 274 pos += scnprintf(buf+pos, bufsz-pos,
252 "ap_sta_id %d - reduced Tx power %d\n", 275 "ap_sta_id %d - reduced Tx power %d force %d\n",
253 ap_sta_id, 276 ap_sta_id,
254 mvm_sta->bt_reduced_txpower); 277 mvm_sta->bt_reduced_txpower,
278 mvm_sta->bt_reduced_txpower_dbg);
255 } 279 }
256 } 280 }
257 281
@@ -269,6 +293,41 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
269 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 293 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
270} 294}
271 295
296static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
297 char *buf, size_t count,
298 loff_t *ppos)
299{
300 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
301 struct iwl_mvm *mvm = mvmvif->mvm;
302 struct iwl_mvm_sta *mvmsta;
303 bool reduced_tx_power;
304 int ret;
305
306 if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
307 return -ENOTCONN;
308
309 if (strtobool(buf, &reduced_tx_power) != 0)
310 return -EINVAL;
311
312 mutex_lock(&mvm->mutex);
313
314 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
315 if (IS_ERR_OR_NULL(mvmsta)) {
316 mutex_unlock(&mvm->mutex);
317 return -ENOTCONN;
318 }
319
320 mvmsta->bt_reduced_txpower_dbg = false;
321 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
322 reduced_tx_power);
323 if (!ret)
324 mvmsta->bt_reduced_txpower_dbg = true;
325
326 mutex_unlock(&mvm->mutex);
327
328 return ret ? : count;
329}
330
272static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, 331static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
273 enum iwl_dbgfs_bf_mask param, int value) 332 enum iwl_dbgfs_bf_mask param, int value)
274{ 333{
@@ -403,9 +462,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
403 mutex_lock(&mvm->mutex); 462 mutex_lock(&mvm->mutex);
404 iwl_dbgfs_update_bf(vif, param, value); 463 iwl_dbgfs_update_bf(vif, param, value);
405 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) 464 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
406 ret = iwl_mvm_disable_beacon_filter(mvm, vif); 465 ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
407 else 466 else
408 ret = iwl_mvm_enable_beacon_filter(mvm, vif); 467 ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
409 mutex_unlock(&mvm->mutex); 468 mutex_unlock(&mvm->mutex);
410 469
411 return ret ?: count; 470 return ret ?: count;
@@ -460,6 +519,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
460 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 519 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
461} 520}
462 521
522static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
523 size_t count, loff_t *ppos)
524{
525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
526 struct iwl_mvm *mvm = mvmvif->mvm;
527 u8 value;
528 int ret;
529
530 ret = kstrtou8(buf, 0, &value);
531 if (ret)
532 return ret;
533 if (value > 1)
534 return -EINVAL;
535
536 mutex_lock(&mvm->mutex);
537 iwl_mvm_update_low_latency(mvm, vif, value);
538 mutex_unlock(&mvm->mutex);
539
540 return count;
541}
542
543static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
544 char __user *user_buf,
545 size_t count, loff_t *ppos)
546{
547 struct ieee80211_vif *vif = file->private_data;
548 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
549 char buf[3];
550
551 buf[0] = mvmvif->low_latency ? '1' : '0';
552 buf[1] = '\n';
553 buf[2] = '\0';
554 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
555}
556
463#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ 557#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
464 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) 558 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
465#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ 559#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -473,6 +567,8 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
473MVM_DEBUGFS_READ_FILE_OPS(mac_params); 567MVM_DEBUGFS_READ_FILE_OPS(mac_params);
474MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); 568MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
475MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); 569MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
570MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
571MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
476 572
477void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 573void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
478{ 574{
@@ -496,15 +592,18 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
496 return; 592 return;
497 } 593 }
498 594
499 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && 595 if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
596 iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
500 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || 597 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
501 (vif->type == NL80211_IFTYPE_STATION && vif->p2p && 598 (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
502 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))) 599 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
503 MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | 600 MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
504 S_IRUSR); 601 S_IRUSR);
505 602
506 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 603 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
507 S_IRUSR); 604 MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
605 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
606 S_IRUSR | S_IWUSR);
508 607
509 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 608 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
510 mvmvif == mvm->bf_allowed_vif) 609 mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 369d4c90e669..1b52deea6081 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -60,11 +60,14 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/vmalloc.h>
64
63#include "mvm.h" 65#include "mvm.h"
64#include "sta.h" 66#include "sta.h"
65#include "iwl-io.h" 67#include "iwl-io.h"
66#include "iwl-prph.h" 68#include "iwl-prph.h"
67#include "debugfs.h" 69#include "debugfs.h"
70#include "fw-error-dump.h"
68 71
69static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, 72static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
70 size_t count, loff_t *ppos) 73 size_t count, loff_t *ppos)
@@ -90,7 +93,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
90static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, 93static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
91 size_t count, loff_t *ppos) 94 size_t count, loff_t *ppos)
92{ 95{
93 struct ieee80211_sta *sta; 96 struct iwl_mvm_sta *mvmsta;
94 int sta_id, drain, ret; 97 int sta_id, drain, ret;
95 98
96 if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) 99 if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
@@ -105,19 +108,63 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
105 108
106 mutex_lock(&mvm->mutex); 109 mutex_lock(&mvm->mutex);
107 110
108 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 111 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
109 lockdep_is_held(&mvm->mutex)); 112
110 if (IS_ERR_OR_NULL(sta)) 113 if (!mvmsta)
111 ret = -ENOENT; 114 ret = -ENOENT;
112 else 115 else
113 ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? : 116 ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
114 count;
115 117
116 mutex_unlock(&mvm->mutex); 118 mutex_unlock(&mvm->mutex);
117 119
118 return ret; 120 return ret;
119} 121}
120 122
123static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
124{
125 struct iwl_mvm *mvm = inode->i_private;
126 int ret;
127
128 if (!mvm)
129 return -EINVAL;
130
131 mutex_lock(&mvm->mutex);
132 if (!mvm->fw_error_dump) {
133 ret = -ENODATA;
134 goto out;
135 }
136
137 file->private_data = mvm->fw_error_dump;
138 mvm->fw_error_dump = NULL;
139 kfree(mvm->fw_error_sram);
140 mvm->fw_error_sram = NULL;
141 mvm->fw_error_sram_len = 0;
142 ret = 0;
143
144out:
145 mutex_unlock(&mvm->mutex);
146 return ret;
147}
148
149static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
150 char __user *user_buf,
151 size_t count, loff_t *ppos)
152{
153 struct iwl_fw_error_dump_file *dump_file = file->private_data;
154
155 return simple_read_from_buffer(user_buf, count, ppos,
156 dump_file,
157 le32_to_cpu(dump_file->file_len));
158}
159
160static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
161 struct file *file)
162{
163 vfree(file->private_data);
164
165 return 0;
166}
167
121static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, 168static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos) 169 size_t count, loff_t *ppos)
123{ 170{
@@ -251,7 +298,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
251 } 298 }
252 299
253 mutex_lock(&mvm->mutex); 300 mutex_lock(&mvm->mutex);
254 ret = iwl_mvm_power_update_device_mode(mvm); 301 ret = iwl_mvm_power_update_device(mvm);
255 mutex_unlock(&mvm->mutex); 302 mutex_unlock(&mvm->mutex);
256 303
257 return ret ?: count; 304 return ret ?: count;
@@ -351,6 +398,9 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
351 le32_to_cpu(notif->secondary_ch_lut)); 398 le32_to_cpu(notif->secondary_ch_lut));
352 pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n", 399 pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
353 le32_to_cpu(notif->bt_activity_grading)); 400 le32_to_cpu(notif->bt_activity_grading));
401 pos += scnprintf(buf+pos, bufsz-pos,
402 "antenna isolation = %d CORUN LUT index = %d\n",
403 mvm->last_ant_isol, mvm->last_corun_lut);
354 404
355 mutex_unlock(&mvm->mutex); 405 mutex_unlock(&mvm->mutex);
356 406
@@ -393,6 +443,22 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
393 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 443 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
394} 444}
395 445
446static ssize_t
447iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
448 size_t count, loff_t *ppos)
449{
450 u32 bt_tx_prio;
451
452 if (sscanf(buf, "%u", &bt_tx_prio) != 1)
453 return -EINVAL;
454 if (bt_tx_prio > 4)
455 return -EINVAL;
456
457 mvm->bt_tx_prio = bt_tx_prio;
458
459 return count;
460}
461
396#define PRINT_STATS_LE32(_str, _val) \ 462#define PRINT_STATS_LE32(_str, _val) \
397 pos += scnprintf(buf + pos, bufsz - pos, \ 463 pos += scnprintf(buf + pos, bufsz - pos, \
398 fmt_table, _str, \ 464 fmt_table, _str, \
@@ -532,6 +598,80 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
532} 598}
533#undef PRINT_STAT_LE32 599#undef PRINT_STAT_LE32
534 600
601static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
602 char __user *user_buf, size_t count,
603 loff_t *ppos,
604 struct iwl_mvm_frame_stats *stats)
605{
606 char *buff, *pos, *endpos;
607 int idx, i;
608 int ret;
609 static const size_t bufsz = 1024;
610
611 buff = kmalloc(bufsz, GFP_KERNEL);
612 if (!buff)
613 return -ENOMEM;
614
615 spin_lock_bh(&mvm->drv_stats_lock);
616
617 pos = buff;
618 endpos = pos + bufsz;
619
620 pos += scnprintf(pos, endpos - pos,
621 "Legacy/HT/VHT\t:\t%d/%d/%d\n",
622 stats->legacy_frames,
623 stats->ht_frames,
624 stats->vht_frames);
625 pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
626 stats->bw_20_frames,
627 stats->bw_40_frames,
628 stats->bw_80_frames);
629 pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
630 stats->ngi_frames,
631 stats->sgi_frames);
632 pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
633 stats->siso_frames,
634 stats->mimo2_frames);
635 pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
636 stats->fail_frames,
637 stats->success_frames);
638 pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
639 stats->agg_frames);
640 pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
641 stats->ampdu_count);
642 pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
643 stats->ampdu_count > 0 ?
644 (stats->agg_frames / stats->ampdu_count) : 0);
645
646 pos += scnprintf(pos, endpos - pos, "Last Rates\n");
647
648 idx = stats->last_frame_idx - 1;
649 for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
650 idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
651 if (stats->last_rates[idx] == 0)
652 continue;
653 pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
654 (int)(ARRAY_SIZE(stats->last_rates) - i));
655 pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
656 }
657 spin_unlock_bh(&mvm->drv_stats_lock);
658
659 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
660 kfree(buff);
661
662 return ret;
663}
664
665static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
666 char __user *user_buf, size_t count,
667 loff_t *ppos)
668{
669 struct iwl_mvm *mvm = file->private_data;
670
671 return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
672 &mvm->drv_rx_stats);
673}
674
535static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, 675static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
536 size_t count, loff_t *ppos) 676 size_t count, loff_t *ppos)
537{ 677{
@@ -592,7 +732,7 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
592 return -EINVAL; 732 return -EINVAL;
593 if (scan_rx_ant > ANT_ABC) 733 if (scan_rx_ant > ANT_ABC)
594 return -EINVAL; 734 return -EINVAL;
595 if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw)) 735 if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
596 return -EINVAL; 736 return -EINVAL;
597 737
598 mvm->scan_rx_ant = scan_rx_ant; 738 mvm->scan_rx_ant = scan_rx_ant;
@@ -600,6 +740,187 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
600 return count; 740 return count;
601} 741}
602 742
743#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
744#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
745static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
746 char __user *user_buf,
747 size_t count, loff_t *ppos)
748{
749 struct iwl_mvm *mvm = file->private_data;
750 struct iwl_bcast_filter_cmd cmd;
751 const struct iwl_fw_bcast_filter *filter;
752 char *buf;
753 int bufsz = 1024;
754 int i, j, pos = 0;
755 ssize_t ret;
756
757 buf = kzalloc(bufsz, GFP_KERNEL);
758 if (!buf)
759 return -ENOMEM;
760
761 mutex_lock(&mvm->mutex);
762 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
763 ADD_TEXT("None\n");
764 mutex_unlock(&mvm->mutex);
765 goto out;
766 }
767 mutex_unlock(&mvm->mutex);
768
769 for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
770 filter = &cmd.filters[i];
771
772 ADD_TEXT("Filter [%d]:\n", i);
773 ADD_TEXT("\tDiscard=%d\n", filter->discard);
774 ADD_TEXT("\tFrame Type: %s\n",
775 filter->frame_type ? "IPv4" : "Generic");
776
777 for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
778 const struct iwl_fw_bcast_filter_attr *attr;
779
780 attr = &filter->attrs[j];
781 if (!attr->mask)
782 break;
783
784 ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
785 j, attr->offset,
786 attr->offset_type ? "IP End" :
787 "Payload Start",
788 be32_to_cpu(attr->mask),
789 be32_to_cpu(attr->val),
790 le16_to_cpu(attr->reserved1));
791 }
792 }
793out:
794 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
795 kfree(buf);
796 return ret;
797}
798
799static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
800 size_t count, loff_t *ppos)
801{
802 int pos, next_pos;
803 struct iwl_fw_bcast_filter filter = {};
804 struct iwl_bcast_filter_cmd cmd;
805 u32 filter_id, attr_id, mask, value;
806 int err = 0;
807
808 if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
809 &filter.frame_type, &pos) != 3)
810 return -EINVAL;
811
812 if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
813 filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
814 return -EINVAL;
815
816 for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
817 attr_id++) {
818 struct iwl_fw_bcast_filter_attr *attr =
819 &filter.attrs[attr_id];
820
821 if (pos >= count)
822 break;
823
824 if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
825 &attr->offset, &attr->offset_type,
826 &mask, &value, &next_pos) != 4)
827 return -EINVAL;
828
829 attr->mask = cpu_to_be32(mask);
830 attr->val = cpu_to_be32(value);
831 if (mask)
832 filter.num_attrs++;
833
834 pos += next_pos;
835 }
836
837 mutex_lock(&mvm->mutex);
838 memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
839 &filter, sizeof(filter));
840
841 /* send updated bcast filtering configuration */
842 if (mvm->dbgfs_bcast_filtering.override &&
843 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
844 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
845 sizeof(cmd), &cmd);
846 mutex_unlock(&mvm->mutex);
847
848 return err ?: count;
849}
850
851static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
852 char __user *user_buf,
853 size_t count, loff_t *ppos)
854{
855 struct iwl_mvm *mvm = file->private_data;
856 struct iwl_bcast_filter_cmd cmd;
857 char *buf;
858 int bufsz = 1024;
859 int i, pos = 0;
860 ssize_t ret;
861
862 buf = kzalloc(bufsz, GFP_KERNEL);
863 if (!buf)
864 return -ENOMEM;
865
866 mutex_lock(&mvm->mutex);
867 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
868 ADD_TEXT("None\n");
869 mutex_unlock(&mvm->mutex);
870 goto out;
871 }
872 mutex_unlock(&mvm->mutex);
873
874 for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
875 const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
876
877 ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
878 i, mac->default_discard, mac->attached_filters);
879 }
880out:
881 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
882 kfree(buf);
883 return ret;
884}
885
886static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
887 char *buf, size_t count,
888 loff_t *ppos)
889{
890 struct iwl_bcast_filter_cmd cmd;
891 struct iwl_fw_bcast_mac mac = {};
892 u32 mac_id, attached_filters;
893 int err = 0;
894
895 if (!mvm->bcast_filters)
896 return -ENOENT;
897
898 if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
899 &attached_filters) != 3)
900 return -EINVAL;
901
902 if (mac_id >= ARRAY_SIZE(cmd.macs) ||
903 mac.default_discard > 1 ||
904 attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
905 return -EINVAL;
906
907 mac.attached_filters = cpu_to_le16(attached_filters);
908
909 mutex_lock(&mvm->mutex);
910 memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
911 &mac, sizeof(mac));
912
913 /* send updated bcast filtering configuration */
914 if (mvm->dbgfs_bcast_filtering.override &&
915 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
916 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
917 sizeof(cmd), &cmd);
918 mutex_unlock(&mvm->mutex);
919
920 return err ?: count;
921}
922#endif
923
603#ifdef CONFIG_PM_SLEEP 924#ifdef CONFIG_PM_SLEEP
604static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf, 925static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
605 size_t count, loff_t *ppos) 926 size_t count, loff_t *ppos)
@@ -658,15 +979,117 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
658} 979}
659#endif 980#endif
660 981
982#define PRINT_MVM_REF(ref) do { \
983 if (test_bit(ref, mvm->ref_bitmap)) \
984 pos += scnprintf(buf + pos, bufsz - pos, \
985 "\t(0x%lx) %s\n", \
986 BIT(ref), #ref); \
987} while (0)
988
989static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
990 char __user *user_buf,
991 size_t count, loff_t *ppos)
992{
993 struct iwl_mvm *mvm = file->private_data;
994 int pos = 0;
995 char buf[256];
996 const size_t bufsz = sizeof(buf);
997
998 pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
999 mvm->ref_bitmap[0]);
1000
1001 PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
1002 PRINT_MVM_REF(IWL_MVM_REF_SCAN);
1003 PRINT_MVM_REF(IWL_MVM_REF_ROC);
1004 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
1005 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
1006 PRINT_MVM_REF(IWL_MVM_REF_USER);
1007
1008 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1009}
1010
1011static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
1012 size_t count, loff_t *ppos)
1013{
1014 unsigned long value;
1015 int ret;
1016 bool taken;
1017
1018 ret = kstrtoul(buf, 10, &value);
1019 if (ret < 0)
1020 return ret;
1021
1022 mutex_lock(&mvm->mutex);
1023
1024 taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
1025 if (value == 1 && !taken)
1026 iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
1027 else if (value == 0 && taken)
1028 iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
1029 else
1030 ret = -EINVAL;
1031
1032 mutex_unlock(&mvm->mutex);
1033
1034 if (ret < 0)
1035 return ret;
1036 return count;
1037}
1038
661#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ 1039#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
662 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) 1040 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
663#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ 1041#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
664 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) 1042 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
665#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \ 1043#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
666 if (!debugfs_create_file(#name, mode, parent, mvm, \ 1044 if (!debugfs_create_file(alias, mode, parent, mvm, \
667 &iwl_dbgfs_##name##_ops)) \ 1045 &iwl_dbgfs_##name##_ops)) \
668 goto err; \ 1046 goto err; \
669 } while (0) 1047 } while (0)
1048#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
1049 MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
1050
1051static ssize_t
1052iwl_dbgfs_prph_reg_read(struct file *file,
1053 char __user *user_buf,
1054 size_t count, loff_t *ppos)
1055{
1056 struct iwl_mvm *mvm = file->private_data;
1057 int pos = 0;
1058 char buf[32];
1059 const size_t bufsz = sizeof(buf);
1060
1061 if (!mvm->dbgfs_prph_reg_addr)
1062 return -EINVAL;
1063
1064 pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
1065 mvm->dbgfs_prph_reg_addr,
1066 iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
1067
1068 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1069}
1070
1071static ssize_t
1072iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
1073 size_t count, loff_t *ppos)
1074{
1075 u8 args;
1076 u32 value;
1077
1078 args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
1079 /* if we only want to set the reg address - nothing more to do */
1080 if (args == 1)
1081 goto out;
1082
1083 /* otherwise, make sure we have both address and value */
1084 if (args != 2)
1085 return -EINVAL;
1086
1087 iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
1088out:
1089 return count;
1090}
1091
1092MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
670 1093
671/* Device wide debugfs entries */ 1094/* Device wide debugfs entries */
672MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); 1095MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
@@ -677,9 +1100,23 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
677MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); 1100MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
678MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); 1101MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
679MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); 1102MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
1103MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
680MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); 1104MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
681MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); 1105MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
1106MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
682MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); 1107MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
1108MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
1109
1110static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
1111 .open = iwl_dbgfs_fw_error_dump_open,
1112 .read = iwl_dbgfs_fw_error_dump_read,
1113 .release = iwl_dbgfs_fw_error_dump_release,
1114};
1115
1116#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1117MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
1118MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
1119#endif
683 1120
684#ifdef CONFIG_PM_SLEEP 1121#ifdef CONFIG_PM_SLEEP
685MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); 1122MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
@@ -687,24 +1124,52 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
687 1124
688int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) 1125int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
689{ 1126{
1127 struct dentry *bcast_dir __maybe_unused;
690 char buf[100]; 1128 char buf[100];
691 1129
1130 spin_lock_init(&mvm->drv_stats_lock);
1131
692 mvm->debugfs_dir = dbgfs_dir; 1132 mvm->debugfs_dir = dbgfs_dir;
693 1133
694 MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR); 1134 MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
695 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); 1135 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
696 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); 1136 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
697 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); 1137 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
1138 MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
698 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); 1139 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
699 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); 1140 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
700 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD) 1141 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
701 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 1142 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
702 S_IRUSR | S_IWUSR); 1143 S_IRUSR | S_IWUSR);
703 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); 1144 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
1145 MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
704 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); 1146 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
705 MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR); 1147 MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
1148 MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
706 MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 1149 MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
707 S_IWUSR | S_IRUSR); 1150 S_IWUSR | S_IRUSR);
1151 MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
1152 MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1153
1154#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1155 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
1156 bcast_dir = debugfs_create_dir("bcast_filtering",
1157 mvm->debugfs_dir);
1158 if (!bcast_dir)
1159 goto err;
1160
1161 if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
1162 bcast_dir,
1163 &mvm->dbgfs_bcast_filtering.override))
1164 goto err;
1165
1166 MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
1167 bcast_dir, S_IWUSR | S_IRUSR);
1168 MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
1169 bcast_dir, S_IWUSR | S_IRUSR);
1170 }
1171#endif
1172
708#ifdef CONFIG_PM_SLEEP 1173#ifdef CONFIG_PM_SLEEP
709 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); 1174 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
710 MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR); 1175 MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 1b4e54d416b0..21877e5966a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -70,37 +70,28 @@
70 70
71/** 71/**
72 * enum iwl_bt_coex_flags - flags for BT_COEX command 72 * enum iwl_bt_coex_flags - flags for BT_COEX command
73 * @BT_CH_PRIMARY_EN:
74 * @BT_CH_SECONDARY_EN:
75 * @BT_NOTIF_COEX_OFF:
76 * @BT_COEX_MODE_POS: 73 * @BT_COEX_MODE_POS:
77 * @BT_COEX_MODE_MSK: 74 * @BT_COEX_MODE_MSK:
78 * @BT_COEX_DISABLE: 75 * @BT_COEX_DISABLE:
79 * @BT_COEX_2W: 76 * @BT_COEX_2W:
80 * @BT_COEX_3W: 77 * @BT_COEX_3W:
81 * @BT_COEX_NW: 78 * @BT_COEX_NW:
82 * @BT_USE_DEFAULTS: 79 * @BT_COEX_SYNC2SCO:
83 * @BT_SYNC_2_BT_DISABLE: 80 * @BT_COEX_CORUNNING:
84 * @BT_COEX_CORUNNING_TBL_EN: 81 * @BT_COEX_MPLUT:
85 * 82 *
86 * The COEX_MODE must be set for each command. Even if it is not changed. 83 * The COEX_MODE must be set for each command. Even if it is not changed.
87 */ 84 */
88enum iwl_bt_coex_flags { 85enum iwl_bt_coex_flags {
89 BT_CH_PRIMARY_EN = BIT(0),
90 BT_CH_SECONDARY_EN = BIT(1),
91 BT_NOTIF_COEX_OFF = BIT(2),
92 BT_COEX_MODE_POS = 3, 86 BT_COEX_MODE_POS = 3,
93 BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS, 87 BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
94 BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS, 88 BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
95 BT_COEX_2W = 0x1 << BT_COEX_MODE_POS, 89 BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
96 BT_COEX_3W = 0x2 << BT_COEX_MODE_POS, 90 BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
97 BT_COEX_NW = 0x3 << BT_COEX_MODE_POS, 91 BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
98 BT_USE_DEFAULTS = BIT(6), 92 BT_COEX_SYNC2SCO = BIT(7),
99 BT_SYNC_2_BT_DISABLE = BIT(7), 93 BT_COEX_CORUNNING = BIT(8),
100 BT_COEX_CORUNNING_TBL_EN = BIT(8), 94 BT_COEX_MPLUT = BIT(9),
101 BT_COEX_MPLUT_TBL_EN = BIT(9),
102 /* Bit 10 is reserved */
103 BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
104}; 95};
105 96
106/* 97/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 8415ff312d0e..10fcc1a79ebd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -231,11 +231,15 @@ enum iwl_wowlan_wakeup_filters {
231 IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), 231 IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
232 IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), 232 IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
233 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), 233 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
234 /* BIT(11) reserved */ 234 IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
235 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), 235 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
236 IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
237 IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
238 IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
239 IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
236}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ 240}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
237 241
238struct iwl_wowlan_config_cmd { 242struct iwl_wowlan_config_cmd_v2 {
239 __le32 wakeup_filter; 243 __le32 wakeup_filter;
240 __le16 non_qos_seq; 244 __le16 non_qos_seq;
241 __le16 qos_seq[8]; 245 __le16 qos_seq[8];
@@ -243,6 +247,12 @@ struct iwl_wowlan_config_cmd {
243 u8 is_11n_connection; 247 u8 is_11n_connection;
244} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */ 248} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
245 249
250struct iwl_wowlan_config_cmd_v3 {
251 struct iwl_wowlan_config_cmd_v2 common;
252 u8 offloading_tid;
253 u8 reserved[3];
254} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
255
246/* 256/*
247 * WOWLAN_TSC_RSC_PARAMS 257 * WOWLAN_TSC_RSC_PARAMS
248 */ 258 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 884c08725308..cbbcd8e284e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -301,54 +301,65 @@ struct iwl_beacon_filter_cmd {
301 301
302/* Beacon filtering and beacon abort */ 302/* Beacon filtering and beacon abort */
303#define IWL_BF_ENERGY_DELTA_DEFAULT 5 303#define IWL_BF_ENERGY_DELTA_DEFAULT 5
304#define IWL_BF_ENERGY_DELTA_D0I3 20
304#define IWL_BF_ENERGY_DELTA_MAX 255 305#define IWL_BF_ENERGY_DELTA_MAX 255
305#define IWL_BF_ENERGY_DELTA_MIN 0 306#define IWL_BF_ENERGY_DELTA_MIN 0
306 307
307#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 308#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
309#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
308#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 310#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
309#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 311#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
310 312
311#define IWL_BF_ROAMING_STATE_DEFAULT 72 313#define IWL_BF_ROAMING_STATE_DEFAULT 72
314#define IWL_BF_ROAMING_STATE_D0I3 72
312#define IWL_BF_ROAMING_STATE_MAX 255 315#define IWL_BF_ROAMING_STATE_MAX 255
313#define IWL_BF_ROAMING_STATE_MIN 0 316#define IWL_BF_ROAMING_STATE_MIN 0
314 317
315#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 318#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
319#define IWL_BF_TEMP_THRESHOLD_D0I3 112
316#define IWL_BF_TEMP_THRESHOLD_MAX 255 320#define IWL_BF_TEMP_THRESHOLD_MAX 255
317#define IWL_BF_TEMP_THRESHOLD_MIN 0 321#define IWL_BF_TEMP_THRESHOLD_MIN 0
318 322
319#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 323#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
324#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
320#define IWL_BF_TEMP_FAST_FILTER_MAX 255 325#define IWL_BF_TEMP_FAST_FILTER_MAX 255
321#define IWL_BF_TEMP_FAST_FILTER_MIN 0 326#define IWL_BF_TEMP_FAST_FILTER_MIN 0
322 327
323#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 328#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
329#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
324#define IWL_BF_TEMP_SLOW_FILTER_MAX 255 330#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
325#define IWL_BF_TEMP_SLOW_FILTER_MIN 0 331#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
326 332
327#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 333#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
328 334
329#define IWL_BF_DEBUG_FLAG_DEFAULT 0 335#define IWL_BF_DEBUG_FLAG_DEFAULT 0
336#define IWL_BF_DEBUG_FLAG_D0I3 0
330 337
331#define IWL_BF_ESCAPE_TIMER_DEFAULT 50 338#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
339#define IWL_BF_ESCAPE_TIMER_D0I3 1024
332#define IWL_BF_ESCAPE_TIMER_MAX 1024 340#define IWL_BF_ESCAPE_TIMER_MAX 1024
333#define IWL_BF_ESCAPE_TIMER_MIN 0 341#define IWL_BF_ESCAPE_TIMER_MIN 0
334 342
335#define IWL_BA_ESCAPE_TIMER_DEFAULT 6 343#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
344#define IWL_BA_ESCAPE_TIMER_D0I3 6
336#define IWL_BA_ESCAPE_TIMER_D3 9 345#define IWL_BA_ESCAPE_TIMER_D3 9
337#define IWL_BA_ESCAPE_TIMER_MAX 1024 346#define IWL_BA_ESCAPE_TIMER_MAX 1024
338#define IWL_BA_ESCAPE_TIMER_MIN 0 347#define IWL_BA_ESCAPE_TIMER_MIN 0
339 348
340#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 349#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
341 350
342#define IWL_BF_CMD_CONFIG_DEFAULTS \ 351#define IWL_BF_CMD_CONFIG(mode) \
343 .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \ 352 .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
344 .bf_roaming_energy_delta = \ 353 .bf_roaming_energy_delta = \
345 cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \ 354 cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
346 .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \ 355 .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
347 .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \ 356 .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
348 .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \ 357 .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
349 .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \ 358 .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
350 .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \ 359 .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
351 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \ 360 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
352 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT) 361 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
353 362
363#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
364#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
354#endif 365#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 85057219cc43..39148b5bb332 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -257,7 +257,8 @@ enum {
257 257
258/* Bit 17-18: (0) SS, (1) SS*2 */ 258/* Bit 17-18: (0) SS, (1) SS*2 */
259#define RATE_MCS_STBC_POS 17 259#define RATE_MCS_STBC_POS 17
260#define RATE_MCS_STBC_MSK (1 << RATE_MCS_STBC_POS) 260#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
261#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
261 262
262/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ 263/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
263#define RATE_MCS_BF_POS 19 264#define RATE_MCS_BF_POS 19
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 1b60fdff6a56..d63647867262 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -199,11 +199,14 @@ enum iwl_sta_modify_flag {
199 * @STA_SLEEP_STATE_AWAKE: 199 * @STA_SLEEP_STATE_AWAKE:
200 * @STA_SLEEP_STATE_PS_POLL: 200 * @STA_SLEEP_STATE_PS_POLL:
201 * @STA_SLEEP_STATE_UAPSD: 201 * @STA_SLEEP_STATE_UAPSD:
202 * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
203 * (last) released frame
202 */ 204 */
203enum iwl_sta_sleep_flag { 205enum iwl_sta_sleep_flag {
204 STA_SLEEP_STATE_AWAKE = 0, 206 STA_SLEEP_STATE_AWAKE = 0,
205 STA_SLEEP_STATE_PS_POLL = BIT(0), 207 STA_SLEEP_STATE_PS_POLL = BIT(0),
206 STA_SLEEP_STATE_UAPSD = BIT(1), 208 STA_SLEEP_STATE_UAPSD = BIT(1),
209 STA_SLEEP_STATE_MOREDATA = BIT(2),
207}; 210};
208 211
209/* STA ID and color bits definitions */ 212/* STA ID and color bits definitions */
@@ -318,13 +321,15 @@ struct iwl_mvm_add_sta_cmd_v5 {
318} __packed; /* ADD_STA_CMD_API_S_VER_5 */ 321} __packed; /* ADD_STA_CMD_API_S_VER_5 */
319 322
320/** 323/**
321 * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station 324 * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
322 * VER_6 of this command is quite similar to VER_5 except 325 * VER_7 of this command is quite similar to VER_5 except
323 * exclusion of all fields related to the security key installation. 326 * exclusion of all fields related to the security key installation.
327 * It only differs from VER_6 by the "awake_acs" field that is
328 * reserved and ignored in VER_6.
324 */ 329 */
325struct iwl_mvm_add_sta_cmd_v6 { 330struct iwl_mvm_add_sta_cmd_v7 {
326 u8 add_modify; 331 u8 add_modify;
327 u8 reserved1; 332 u8 awake_acs;
328 __le16 tid_disable_tx; 333 __le16 tid_disable_tx;
329 __le32 mac_id_n_color; 334 __le32 mac_id_n_color;
330 u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ 335 u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@@ -342,7 +347,7 @@ struct iwl_mvm_add_sta_cmd_v6 {
342 __le16 assoc_id; 347 __le16 assoc_id;
343 __le16 beamform_flags; 348 __le16 beamform_flags;
344 __le32 tfd_queue_msk; 349 __le32 tfd_queue_msk;
345} __packed; /* ADD_STA_CMD_API_S_VER_6 */ 350} __packed; /* ADD_STA_CMD_API_S_VER_7 */
346 351
347/** 352/**
348 * struct iwl_mvm_add_sta_key_cmd - add/modify sta key 353 * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
@@ -432,5 +437,15 @@ struct iwl_mvm_wep_key_cmd {
432 struct iwl_mvm_wep_key wep_key[0]; 437 struct iwl_mvm_wep_key wep_key[0];
433} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ 438} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
434 439
440/**
441 * struct iwl_mvm_eosp_notification - EOSP notification from firmware
442 * @remain_frame_count: # of frames remaining, non-zero if SP was cut
443 * short by GO absence
444 * @sta_id: station ID
445 */
446struct iwl_mvm_eosp_notification {
447 __le32 remain_frame_count;
448 __le32 sta_id;
449} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
435 450
436#endif /* __fw_api_sta_h__ */ 451#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index b674c2a2b51c..8e122f3a7a74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -76,6 +76,8 @@
76 * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence 76 * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
77 * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence 77 * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
78 * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) 78 * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
79 * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
80 * on old firmwares).
79 * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame 81 * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
80 * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. 82 * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
81 * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command 83 * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
@@ -107,6 +109,7 @@ enum iwl_tx_flags {
107 TX_CMD_FLG_VHT_NDPA = BIT(8), 109 TX_CMD_FLG_VHT_NDPA = BIT(8),
108 TX_CMD_FLG_HT_NDPA = BIT(9), 110 TX_CMD_FLG_HT_NDPA = BIT(9),
109 TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), 111 TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
112 TX_CMD_FLG_BT_PRIO_POS = 11,
110 TX_CMD_FLG_BT_DIS = BIT(12), 113 TX_CMD_FLG_BT_DIS = BIT(12),
111 TX_CMD_FLG_SEQ_CTL = BIT(13), 114 TX_CMD_FLG_SEQ_CTL = BIT(13),
112 TX_CMD_FLG_MORE_FRAG = BIT(14), 115 TX_CMD_FLG_MORE_FRAG = BIT(14),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 989d7dbdca6c..6e75b52588de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -70,7 +70,7 @@
70#include "fw-api-mac.h" 70#include "fw-api-mac.h"
71#include "fw-api-power.h" 71#include "fw-api-power.h"
72#include "fw-api-d3.h" 72#include "fw-api-d3.h"
73#include "fw-api-bt-coex.h" 73#include "fw-api-coex.h"
74 74
75/* maximal number of Tx queues in any platform */ 75/* maximal number of Tx queues in any platform */
76#define IWL_MVM_MAX_QUEUES 20 76#define IWL_MVM_MAX_QUEUES 20
@@ -95,6 +95,7 @@ enum {
95 /* PHY context commands */ 95 /* PHY context commands */
96 PHY_CONTEXT_CMD = 0x8, 96 PHY_CONTEXT_CMD = 0x8,
97 DBG_CFG = 0x9, 97 DBG_CFG = 0x9,
98 ANTENNA_COUPLING_NOTIFICATION = 0xa,
98 99
99 /* station table */ 100 /* station table */
100 ADD_STA_KEY = 0x17, 101 ADD_STA_KEY = 0x17,
@@ -163,6 +164,7 @@ enum {
163 TX_ANT_CONFIGURATION_CMD = 0x98, 164 TX_ANT_CONFIGURATION_CMD = 0x98,
164 BT_CONFIG = 0x9b, 165 BT_CONFIG = 0x9b,
165 STATISTICS_NOTIFICATION = 0x9d, 166 STATISTICS_NOTIFICATION = 0x9d,
167 EOSP_NOTIFICATION = 0x9e,
166 REDUCE_TX_POWER_CMD = 0x9f, 168 REDUCE_TX_POWER_CMD = 0x9f,
167 169
168 /* RF-KILL commands and notifications */ 170 /* RF-KILL commands and notifications */
@@ -190,6 +192,7 @@ enum {
190 REPLY_DEBUG_CMD = 0xf0, 192 REPLY_DEBUG_CMD = 0xf0,
191 DEBUG_LOG_MSG = 0xf7, 193 DEBUG_LOG_MSG = 0xf7,
192 194
195 BCAST_FILTER_CMD = 0xcf,
193 MCAST_FILTER_CMD = 0xd0, 196 MCAST_FILTER_CMD = 0xd0,
194 197
195 /* D3 commands/notifications */ 198 /* D3 commands/notifications */
@@ -197,6 +200,7 @@ enum {
197 PROT_OFFLOAD_CONFIG_CMD = 0xd4, 200 PROT_OFFLOAD_CONFIG_CMD = 0xd4,
198 OFFLOADS_QUERY_CMD = 0xd5, 201 OFFLOADS_QUERY_CMD = 0xd5,
199 REMOTE_WAKE_CONFIG_CMD = 0xd6, 202 REMOTE_WAKE_CONFIG_CMD = 0xd6,
203 D0I3_END_CMD = 0xed,
200 204
201 /* for WoWLAN in particular */ 205 /* for WoWLAN in particular */
202 WOWLAN_PATTERNS = 0xe0, 206 WOWLAN_PATTERNS = 0xe0,
@@ -313,14 +317,12 @@ enum {
313 317
314/* Section types for NVM_ACCESS_CMD */ 318/* Section types for NVM_ACCESS_CMD */
315enum { 319enum {
316 NVM_SECTION_TYPE_HW = 0, 320 NVM_SECTION_TYPE_SW = 1,
317 NVM_SECTION_TYPE_SW, 321 NVM_SECTION_TYPE_REGULATORY = 3,
318 NVM_SECTION_TYPE_PAPD, 322 NVM_SECTION_TYPE_CALIBRATION = 4,
319 NVM_SECTION_TYPE_BT, 323 NVM_SECTION_TYPE_PRODUCTION = 5,
320 NVM_SECTION_TYPE_CALIBRATION, 324 NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
321 NVM_SECTION_TYPE_PRODUCTION, 325 NVM_MAX_NUM_SECTIONS = 12,
322 NVM_SECTION_TYPE_POST_FCS_CALIB,
323 NVM_NUM_OF_SECTIONS,
324}; 326};
325 327
326/** 328/**
@@ -412,6 +414,35 @@ struct mvm_alive_resp {
412 __le32 scd_base_ptr; /* SRAM address for SCD */ 414 __le32 scd_base_ptr; /* SRAM address for SCD */
413} __packed; /* ALIVE_RES_API_S_VER_1 */ 415} __packed; /* ALIVE_RES_API_S_VER_1 */
414 416
417struct mvm_alive_resp_ver2 {
418 __le16 status;
419 __le16 flags;
420 u8 ucode_minor;
421 u8 ucode_major;
422 __le16 id;
423 u8 api_minor;
424 u8 api_major;
425 u8 ver_subtype;
426 u8 ver_type;
427 u8 mac;
428 u8 opt;
429 __le16 reserved2;
430 __le32 timestamp;
431 __le32 error_event_table_ptr; /* SRAM address for error log */
432 __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
433 __le32 cpu_register_ptr;
434 __le32 dbgm_config_ptr;
435 __le32 alive_counter_ptr;
436 __le32 scd_base_ptr; /* SRAM address for SCD */
437 __le32 st_fwrd_addr; /* pointer to Store and forward */
438 __le32 st_fwrd_size;
439 u8 umac_minor; /* UMAC version: minor */
440 u8 umac_major; /* UMAC version: major */
441 __le16 umac_id; /* UMAC version: id */
442 __le32 error_info_addr; /* SRAM address for UMAC error log */
443 __le32 dbg_print_buff_addr;
444} __packed; /* ALIVE_RES_API_S_VER_2 */
445
415/* Error response/notification */ 446/* Error response/notification */
416enum { 447enum {
417 FW_ERR_UNKNOWN_CMD = 0x0, 448 FW_ERR_UNKNOWN_CMD = 0x0,
@@ -682,6 +713,7 @@ enum {
682 TE_V2_NOTIF_HOST_FRAG_END = BIT(5), 713 TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
683 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), 714 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
684 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), 715 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
716 T2_V2_START_IMMEDIATELY = BIT(11),
685 717
686 TE_V2_NOTIF_MSK = 0xff, 718 TE_V2_NOTIF_MSK = 0xff,
687 719
@@ -1159,6 +1191,90 @@ struct iwl_mcast_filter_cmd {
1159 u8 addr_list[0]; 1191 u8 addr_list[0];
1160} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ 1192} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
1161 1193
1194#define MAX_BCAST_FILTERS 8
1195#define MAX_BCAST_FILTER_ATTRS 2
1196
1197/**
1198 * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
1199 * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
1200 * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
1201 * start of ip payload).
1202 */
1203enum iwl_mvm_bcast_filter_attr_offset {
1204 BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
1205 BCAST_FILTER_OFFSET_IP_END = 1,
1206};
1207
1208/**
1209 * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
1210 * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
1211 * @offset: starting offset of this pattern.
1212 * @val: value to match - big endian (MSB is the first
1213 * byte to match from offset pos).
1214 * @mask: mask to match (big endian).
1215 */
1216struct iwl_fw_bcast_filter_attr {
1217 u8 offset_type;
1218 u8 offset;
1219 __le16 reserved1;
1220 __be32 val;
1221 __be32 mask;
1222} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
1223
1224/**
1225 * enum iwl_mvm_bcast_filter_frame_type - filter frame type
1226 * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
1227 * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
1228 */
1229enum iwl_mvm_bcast_filter_frame_type {
1230 BCAST_FILTER_FRAME_TYPE_ALL = 0,
1231 BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
1232};
1233
1234/**
1235 * struct iwl_fw_bcast_filter - broadcast filter
1236 * @discard: discard frame (1) or let it pass (0).
1237 * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
1238 * @num_attrs: number of valid attributes in this filter.
1239 * @attrs: attributes of this filter. a filter is considered matched
1240 * only when all its attributes are matched (i.e. AND relationship)
1241 */
1242struct iwl_fw_bcast_filter {
1243 u8 discard;
1244 u8 frame_type;
1245 u8 num_attrs;
1246 u8 reserved1;
1247 struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
1248} __packed; /* BCAST_FILTER_S_VER_1 */
1249
1250/**
1251 * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
1252 * @default_discard: default action for this mac (discard (1) / pass (0)).
1253 * @attached_filters: bitmap of relevant filters for this mac.
1254 */
1255struct iwl_fw_bcast_mac {
1256 u8 default_discard;
1257 u8 reserved1;
1258 __le16 attached_filters;
1259} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
1260
1261/**
1262 * struct iwl_bcast_filter_cmd - broadcast filtering configuration
1263 * @disable: enable (0) / disable (1)
1264 * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
1265 * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
1266 * @filters: broadcast filters
1267 * @macs: broadcast filtering configuration per-mac
1268 */
1269struct iwl_bcast_filter_cmd {
1270 u8 disable;
1271 u8 max_bcast_filters;
1272 u8 max_macs;
1273 u8 reserved1;
1274 struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
1275 struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
1276} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
1277
1162struct mvm_statistics_dbg { 1278struct mvm_statistics_dbg {
1163 __le32 burst_check; 1279 __le32 burst_check;
1164 __le32 burst_count; 1280 __le32 burst_count;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
new file mode 100644
index 000000000000..58c8941c0d95
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
@@ -0,0 +1,106 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_error_dump_h__
64#define __fw_error_dump_h__
65
66#include <linux/types.h>
67
68#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
69
70/**
71 * enum iwl_fw_error_dump_type - types of data in the dump file
72 * @IWL_FW_ERROR_DUMP_SRAM:
73 * @IWL_FW_ERROR_DUMP_REG:
74 */
75enum iwl_fw_error_dump_type {
76 IWL_FW_ERROR_DUMP_SRAM = 0,
77 IWL_FW_ERROR_DUMP_REG = 1,
78
79 IWL_FW_ERROR_DUMP_MAX,
80};
81
82/**
83 * struct iwl_fw_error_dump_data - data for one type
84 * @type: %enum iwl_fw_error_dump_type
85 * @len: the length starting from %data - must be a multiplier of 4.
86 * @data: the data itself padded to be a multiplier of 4.
87 */
88struct iwl_fw_error_dump_data {
89 __le32 type;
90 __le32 len;
91 __u8 data[];
92} __packed __aligned(4);
93
94/**
95 * struct iwl_fw_error_dump_file - the layout of the header of the file
96 * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
97 * @file_len: the length of all the file starting from %barker
98 * @data: array of %struct iwl_fw_error_dump_data
99 */
100struct iwl_fw_error_dump_file {
101 __le32 barker;
102 __le32 file_len;
103 u8 data[0];
104} __packed __aligned(4);
105
106#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c03d39541f9e..7ce20062f32d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -110,18 +110,48 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
110 container_of(notif_wait, struct iwl_mvm, notif_wait); 110 container_of(notif_wait, struct iwl_mvm, notif_wait);
111 struct iwl_mvm_alive_data *alive_data = data; 111 struct iwl_mvm_alive_data *alive_data = data;
112 struct mvm_alive_resp *palive; 112 struct mvm_alive_resp *palive;
113 113 struct mvm_alive_resp_ver2 *palive2;
114 palive = (void *)pkt->data; 114
115 115 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
116 mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr); 116 palive = (void *)pkt->data;
117 mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr); 117
118 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); 118 mvm->support_umac_log = false;
119 119 mvm->error_event_table =
120 alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK; 120 le32_to_cpu(palive->error_event_table_ptr);
121 IWL_DEBUG_FW(mvm, 121 mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
122 "Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 122 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
123 le16_to_cpu(palive->status), palive->ver_type, 123
124 palive->ver_subtype, palive->flags); 124 alive_data->valid = le16_to_cpu(palive->status) ==
125 IWL_ALIVE_STATUS_OK;
126 IWL_DEBUG_FW(mvm,
127 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
128 le16_to_cpu(palive->status), palive->ver_type,
129 palive->ver_subtype, palive->flags);
130 } else {
131 palive2 = (void *)pkt->data;
132
133 mvm->error_event_table =
134 le32_to_cpu(palive2->error_event_table_ptr);
135 mvm->log_event_table =
136 le32_to_cpu(palive2->log_event_table_ptr);
137 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
138 mvm->umac_error_event_table =
139 le32_to_cpu(palive2->error_info_addr);
140
141 alive_data->valid = le16_to_cpu(palive2->status) ==
142 IWL_ALIVE_STATUS_OK;
143 if (mvm->umac_error_event_table)
144 mvm->support_umac_log = true;
145
146 IWL_DEBUG_FW(mvm,
147 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
148 le16_to_cpu(palive2->status), palive2->ver_type,
149 palive2->ver_subtype, palive2->flags);
150
151 IWL_DEBUG_FW(mvm,
152 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
153 palive2->umac_major, palive2->umac_minor);
154 }
125 155
126 return true; 156 return true;
127} 157}
@@ -292,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
292 } 322 }
293 323
294 /* Send TX valid antennas before triggering calibrations */ 324 /* Send TX valid antennas before triggering calibrations */
295 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw)); 325 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
296 if (ret) 326 if (ret)
297 goto error; 327 goto error;
298 328
@@ -328,8 +358,6 @@ out:
328 GFP_KERNEL); 358 GFP_KERNEL);
329 if (!mvm->nvm_data) 359 if (!mvm->nvm_data)
330 return -ENOMEM; 360 return -ENOMEM;
331 mvm->nvm_data->valid_rx_ant = 1;
332 mvm->nvm_data->valid_tx_ant = 1;
333 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; 361 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
334 mvm->nvm_data->bands[0].n_channels = 1; 362 mvm->nvm_data->bands[0].n_channels = 1;
335 mvm->nvm_data->bands[0].n_bitrates = 1; 363 mvm->nvm_data->bands[0].n_bitrates = 1;
@@ -341,8 +369,6 @@ out:
341 return ret; 369 return ret;
342} 370}
343 371
344#define UCODE_CALIB_TIMEOUT (2*HZ)
345
346int iwl_mvm_up(struct iwl_mvm *mvm) 372int iwl_mvm_up(struct iwl_mvm *mvm)
347{ 373{
348 int ret, i; 374 int ret, i;
@@ -394,7 +420,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
394 if (ret) 420 if (ret)
395 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); 421 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
396 422
397 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw)); 423 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
398 if (ret) 424 if (ret)
399 goto error; 425 goto error;
400 426
@@ -439,10 +465,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
439 goto error; 465 goto error;
440 } 466 }
441 467
442 ret = iwl_mvm_power_update_device_mode(mvm); 468 /* Initialize tx backoffs to the minimal possible */
469 iwl_mvm_tt_tx_backoff(mvm, 0);
470
471 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
472 ret = iwl_power_legacy_set_cam_mode(mvm);
473 if (ret)
474 goto error;
475 }
476
477 ret = iwl_mvm_power_update_device(mvm);
443 if (ret) 478 if (ret)
444 goto error; 479 goto error;
445 480
481 /* allow FW/transport low power modes if not during restart */
482 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
483 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
484
446 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 485 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
447 return 0; 486 return 0;
448 error: 487 error:
@@ -466,7 +505,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
466 goto error; 505 goto error;
467 } 506 }
468 507
469 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw)); 508 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
470 if (ret) 509 if (ret)
471 goto error; 510 goto error;
472 511
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 6b4ea6bf8ffe..e3b3cf4dbd77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -94,6 +94,8 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
94 int ret; 94 int ret;
95 95
96 switch (mode) { 96 switch (mode) {
97 case IWL_LED_BLINK:
98 IWL_ERR(mvm, "Blink led mode not supported, used default\n");
97 case IWL_LED_DEFAULT: 99 case IWL_LED_DEFAULT:
98 case IWL_LED_RF_STATE: 100 case IWL_LED_RF_STATE:
99 mode = IWL_LED_RF_STATE; 101 mode = IWL_LED_RF_STATE;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index ba723d50939a..9ccec10bba16 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -90,6 +90,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
90{ 90{
91 struct iwl_mvm_mac_iface_iterator_data *data = _data; 91 struct iwl_mvm_mac_iface_iterator_data *data = _data;
92 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 92 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
93 u16 min_bi;
93 94
94 /* Skip the interface for which we are trying to assign a tsf_id */ 95 /* Skip the interface for which we are trying to assign a tsf_id */
95 if (vif == data->vif) 96 if (vif == data->vif)
@@ -114,42 +115,57 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
114 switch (data->vif->type) { 115 switch (data->vif->type) {
115 case NL80211_IFTYPE_STATION: 116 case NL80211_IFTYPE_STATION:
116 /* 117 /*
117 * The new interface is client, so if the existing one 118 * The new interface is a client, so if the one we're iterating
118 * we're iterating is an AP, and both interfaces have the 119 * is an AP, and the beacon interval of the AP is a multiple or
119 * same beacon interval, the same TSF should be used to 120 * divisor of the beacon interval of the client, the same TSF
120 * avoid drift between the new client and existing AP, 121 * should be used to avoid drift between the new client and
121 * the existing AP will get drift updates from the new 122 * existing AP. The existing AP will get drift updates from the
122 * client context in this case 123 * new client context in this case.
123 */ 124 */
124 if (vif->type == NL80211_IFTYPE_AP) { 125 if (vif->type != NL80211_IFTYPE_AP ||
125 if (data->preferred_tsf == NUM_TSF_IDS && 126 data->preferred_tsf != NUM_TSF_IDS ||
126 test_bit(mvmvif->tsf_id, data->available_tsf_ids) && 127 !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
127 (vif->bss_conf.beacon_int == 128 break;
128 data->vif->bss_conf.beacon_int)) { 129
129 data->preferred_tsf = mvmvif->tsf_id; 130 min_bi = min(data->vif->bss_conf.beacon_int,
130 return; 131 vif->bss_conf.beacon_int);
131 } 132
133 if (!min_bi)
134 break;
135
136 if ((data->vif->bss_conf.beacon_int -
137 vif->bss_conf.beacon_int) % min_bi == 0) {
138 data->preferred_tsf = mvmvif->tsf_id;
139 return;
132 } 140 }
133 break; 141 break;
142
134 case NL80211_IFTYPE_AP: 143 case NL80211_IFTYPE_AP:
135 /* 144 /*
136 * The new interface is AP/GO, so in case both interfaces 145 * The new interface is AP/GO, so if its beacon interval is a
137 * have the same beacon interval, it should get drift 146 * multiple or a divisor of the beacon interval of an existing
138 * updates from an existing client or use the same 147 * interface, it should get drift updates from an existing
139 * TSF as an existing GO. There's no drift between 148 * client or use the same TSF as an existing GO. There's no
140 * TSFs internally but if they used different TSFs 149 * drift between TSFs internally but if they used different
141 * then a new client MAC could update one of them 150 * TSFs then a new client MAC could update one of them and
142 * and cause drift that way. 151 * cause drift that way.
143 */ 152 */
144 if (vif->type == NL80211_IFTYPE_STATION || 153 if ((vif->type != NL80211_IFTYPE_AP &&
145 vif->type == NL80211_IFTYPE_AP) { 154 vif->type != NL80211_IFTYPE_STATION) ||
146 if (data->preferred_tsf == NUM_TSF_IDS && 155 data->preferred_tsf != NUM_TSF_IDS ||
147 test_bit(mvmvif->tsf_id, data->available_tsf_ids) && 156 !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
148 (vif->bss_conf.beacon_int == 157 break;
149 data->vif->bss_conf.beacon_int)) { 158
150 data->preferred_tsf = mvmvif->tsf_id; 159 min_bi = min(data->vif->bss_conf.beacon_int,
151 return; 160 vif->bss_conf.beacon_int);
152 } 161
162 if (!min_bi)
163 break;
164
165 if ((data->vif->bss_conf.beacon_int -
166 vif->bss_conf.beacon_int) % min_bi == 0) {
167 data->preferred_tsf = mvmvif->tsf_id;
168 return;
153 } 169 }
154 break; 170 break;
155 default: 171 default:
@@ -936,7 +952,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
936 TX_CMD_FLG_TSF); 952 TX_CMD_FLG_TSF);
937 953
938 mvm->mgmt_last_antenna_idx = 954 mvm->mgmt_last_antenna_idx =
939 iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw), 955 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
940 mvm->mgmt_last_antenna_idx); 956 mvm->mgmt_last_antenna_idx);
941 957
942 beacon_cmd.tx.rate_n_flags = 958 beacon_cmd.tx.rate_n_flags =
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c35b8661b395..4dd9ff43b8b6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -66,7 +66,9 @@
66#include <linux/netdevice.h> 66#include <linux/netdevice.h>
67#include <linux/etherdevice.h> 67#include <linux/etherdevice.h>
68#include <linux/ip.h> 68#include <linux/ip.h>
69#include <linux/if_arp.h>
69#include <net/mac80211.h> 70#include <net/mac80211.h>
71#include <net/ieee80211_radiotap.h>
70#include <net/tcp.h> 72#include <net/tcp.h>
71 73
72#include "iwl-op-mode.h" 74#include "iwl-op-mode.h"
@@ -128,6 +130,117 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
128}; 130};
129#endif 131#endif
130 132
133#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
134/*
135 * Use the reserved field to indicate magic values.
136 * these values will only be used internally by the driver,
137 * and won't make it to the fw (reserved will be 0).
138 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
139 * be the vif's ip address. in case there is not a single
140 * ip address (0, or more than 1), this attribute will
141 * be skipped.
142 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
143 * the LSB bytes of the vif's mac address
144 */
145enum {
146 BC_FILTER_MAGIC_NONE = 0,
147 BC_FILTER_MAGIC_IP,
148 BC_FILTER_MAGIC_MAC,
149};
150
151static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
152 {
153 /* arp */
154 .discard = 0,
155 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
156 .attrs = {
157 {
158 /* frame type - arp, hw type - ethernet */
159 .offset_type =
160 BCAST_FILTER_OFFSET_PAYLOAD_START,
161 .offset = sizeof(rfc1042_header),
162 .val = cpu_to_be32(0x08060001),
163 .mask = cpu_to_be32(0xffffffff),
164 },
165 {
166 /* arp dest ip */
167 .offset_type =
168 BCAST_FILTER_OFFSET_PAYLOAD_START,
169 .offset = sizeof(rfc1042_header) + 2 +
170 sizeof(struct arphdr) +
171 ETH_ALEN + sizeof(__be32) +
172 ETH_ALEN,
173 .mask = cpu_to_be32(0xffffffff),
174 /* mark it as special field */
175 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
176 },
177 },
178 },
179 {
180 /* dhcp offer bcast */
181 .discard = 0,
182 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
183 .attrs = {
184 {
185 /* udp dest port - 68 (bootp client)*/
186 .offset_type = BCAST_FILTER_OFFSET_IP_END,
187 .offset = offsetof(struct udphdr, dest),
188 .val = cpu_to_be32(0x00440000),
189 .mask = cpu_to_be32(0xffff0000),
190 },
191 {
192 /* dhcp - lsb bytes of client hw address */
193 .offset_type = BCAST_FILTER_OFFSET_IP_END,
194 .offset = 38,
195 .mask = cpu_to_be32(0xffffffff),
196 /* mark it as special field */
197 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
198 },
199 },
200 },
201 /* last filter must be empty */
202 {},
203};
204#endif
205
206void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
207{
208 if (!iwl_mvm_is_d0i3_supported(mvm))
209 return;
210
211 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
212 WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
213 iwl_trans_ref(mvm->trans);
214}
215
216void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
217{
218 if (!iwl_mvm_is_d0i3_supported(mvm))
219 return;
220
221 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
222 WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
223 iwl_trans_unref(mvm->trans);
224}
225
226static void
227iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
228{
229 int i;
230
231 if (!iwl_mvm_is_d0i3_supported(mvm))
232 return;
233
234 for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
235 if (ref == i)
236 continue;
237
238 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
239 clear_bit(i, mvm->ref_bitmap);
240 iwl_trans_unref(mvm->trans);
241 }
242}
243
131static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 244static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
132{ 245{
133 int i; 246 int i;
@@ -168,6 +281,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
168 281
169 hw->queues = mvm->first_agg_queue; 282 hw->queues = mvm->first_agg_queue;
170 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 283 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
284 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
285 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
286 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
171 hw->rate_control_algorithm = "iwl-mvm-rs"; 287 hw->rate_control_algorithm = "iwl-mvm-rs";
172 288
173 /* 289 /*
@@ -179,7 +295,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
179 !iwlwifi_mod_params.sw_crypto) 295 !iwlwifi_mod_params.sw_crypto)
180 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 296 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
181 297
182 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { 298 if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
183 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; 299 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
184 hw->uapsd_queues = IWL_UAPSD_AC_INFO; 300 hw->uapsd_queues = IWL_UAPSD_AC_INFO;
185 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 301 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -203,6 +319,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
203 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 319 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
204 REGULATORY_DISABLE_BEACON_HINTS; 320 REGULATORY_DISABLE_BEACON_HINTS;
205 321
322 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
323 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
324
206 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 325 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
207 hw->wiphy->n_iface_combinations = 326 hw->wiphy->n_iface_combinations =
208 ARRAY_SIZE(iwl_mvm_iface_combinations); 327 ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -246,7 +365,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
246 else 365 else
247 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 366 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
248 367
249 if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) { 368 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
250 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 369 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
251 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 370 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
252 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 371 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -256,8 +375,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
256 } 375 }
257 376
258 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 377 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
259 NL80211_FEATURE_P2P_GO_OPPPS | 378 NL80211_FEATURE_P2P_GO_OPPPS;
260 NL80211_FEATURE_LOW_PRIORITY_SCAN;
261 379
262 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 380 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
263 381
@@ -289,6 +407,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
289 } 407 }
290#endif 408#endif
291 409
410#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
411 /* assign default bcast filtering configuration */
412 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
413#endif
414
292 ret = iwl_mvm_leds_init(mvm); 415 ret = iwl_mvm_leds_init(mvm);
293 if (ret) 416 if (ret)
294 return ret; 417 return ret;
@@ -300,11 +423,55 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
300 return ret; 423 return ret;
301} 424}
302 425
426static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
427 struct ieee80211_sta *sta,
428 struct sk_buff *skb)
429{
430 struct iwl_mvm_sta *mvmsta;
431 bool defer = false;
432
433 /*
434 * double check the IN_D0I3 flag both before and after
435 * taking the spinlock, in order to prevent taking
436 * the spinlock when not needed.
437 */
438 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
439 return false;
440
441 spin_lock(&mvm->d0i3_tx_lock);
442 /*
443 * testing the flag again ensures the skb dequeue
444 * loop (on d0i3 exit) hasn't run yet.
445 */
446 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
447 goto out;
448
449 mvmsta = iwl_mvm_sta_from_mac80211(sta);
450 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
451 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
452 goto out;
453
454 __skb_queue_tail(&mvm->d0i3_tx, skb);
455 ieee80211_stop_queues(mvm->hw);
456
457 /* trigger wakeup */
458 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
459 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
460
461 defer = true;
462out:
463 spin_unlock(&mvm->d0i3_tx_lock);
464 return defer;
465}
466
303static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 467static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
304 struct ieee80211_tx_control *control, 468 struct ieee80211_tx_control *control,
305 struct sk_buff *skb) 469 struct sk_buff *skb)
306{ 470{
307 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 471 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
472 struct ieee80211_sta *sta = control->sta;
473 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
474 struct ieee80211_hdr *hdr = (void *)skb->data;
308 475
309 if (iwl_mvm_is_radio_killed(mvm)) { 476 if (iwl_mvm_is_radio_killed(mvm)) {
310 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 477 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
@@ -315,8 +482,18 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
315 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) 482 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
316 goto drop; 483 goto drop;
317 484
318 if (control->sta) { 485 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
319 if (iwl_mvm_tx_skb(mvm, skb, control->sta)) 486 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
487 ieee80211_is_mgmt(hdr->frame_control) &&
488 !ieee80211_is_deauth(hdr->frame_control) &&
489 !ieee80211_is_disassoc(hdr->frame_control) &&
490 !ieee80211_is_action(hdr->frame_control)))
491 sta = NULL;
492
493 if (sta) {
494 if (iwl_mvm_defer_tx(mvm, sta, skb))
495 return;
496 if (iwl_mvm_tx_skb(mvm, skb, sta))
320 goto drop; 497 goto drop;
321 return; 498 return;
322 } 499 }
@@ -354,6 +531,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
354{ 531{
355 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 532 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
356 int ret; 533 int ret;
534 bool tx_agg_ref = false;
357 535
358 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 536 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
359 sta->addr, tid, action); 537 sta->addr, tid, action);
@@ -361,6 +539,23 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
361 if (!(mvm->nvm_data->sku_cap_11n_enable)) 539 if (!(mvm->nvm_data->sku_cap_11n_enable))
362 return -EACCES; 540 return -EACCES;
363 541
542 /* return from D0i3 before starting a new Tx aggregation */
543 if (action == IEEE80211_AMPDU_TX_START) {
544 iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
545 tx_agg_ref = true;
546
547 /*
548 * wait synchronously until D0i3 exit to get the correct
549 * sequence number for the tid
550 */
551 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
552 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
553 WARN_ON_ONCE(1);
554 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
555 return -EIO;
556 }
557 }
558
364 mutex_lock(&mvm->mutex); 559 mutex_lock(&mvm->mutex);
365 560
366 switch (action) { 561 switch (action) {
@@ -398,6 +593,13 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
398 } 593 }
399 mutex_unlock(&mvm->mutex); 594 mutex_unlock(&mvm->mutex);
400 595
596 /*
597 * If the tid is marked as started, we won't use it for offloaded
598 * traffic on the next D0i3 entry. It's safe to unref.
599 */
600 if (tx_agg_ref)
601 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
602
401 return ret; 603 return ret;
402} 604}
403 605
@@ -422,6 +624,15 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
422 624
423static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 625static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
424{ 626{
627#ifdef CONFIG_IWLWIFI_DEBUGFS
628 static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
629
630 iwl_mvm_fw_error_dump(mvm);
631
632 /* notify the userspace about the error we had */
633 kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
634#endif
635
425 iwl_trans_stop_device(mvm->trans); 636 iwl_trans_stop_device(mvm->trans);
426 637
427 mvm->scan_status = IWL_MVM_SCAN_NONE; 638 mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -434,6 +645,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
434 iwl_mvm_cleanup_iterator, mvm); 645 iwl_mvm_cleanup_iterator, mvm);
435 646
436 mvm->p2p_device_vif = NULL; 647 mvm->p2p_device_vif = NULL;
648 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
437 649
438 iwl_mvm_reset_phy_ctxts(mvm); 650 iwl_mvm_reset_phy_ctxts(mvm);
439 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 651 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -441,6 +653,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
441 653
442 ieee80211_wake_queues(mvm->hw); 654 ieee80211_wake_queues(mvm->hw);
443 655
656 /* cleanup all stale references (scan, roc), but keep the
657 * ucode_down ref until reconfig is complete */
658 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
659
444 mvm->vif_count = 0; 660 mvm->vif_count = 0;
445 mvm->rx_ba_sessions = 0; 661 mvm->rx_ba_sessions = 0;
446} 662}
@@ -470,11 +686,15 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
470 mutex_lock(&mvm->mutex); 686 mutex_lock(&mvm->mutex);
471 687
472 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 688 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
689 iwl_mvm_d0i3_enable_tx(mvm, NULL);
473 ret = iwl_mvm_update_quotas(mvm, NULL); 690 ret = iwl_mvm_update_quotas(mvm, NULL);
474 if (ret) 691 if (ret)
475 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 692 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
476 ret); 693 ret);
477 694
695 /* allow transport/FW low power modes */
696 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
697
478 mutex_unlock(&mvm->mutex); 698 mutex_unlock(&mvm->mutex);
479} 699}
480 700
@@ -482,9 +702,14 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
482{ 702{
483 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 703 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
484 704
705 flush_work(&mvm->d0i3_exit_work);
485 flush_work(&mvm->async_handlers_wk); 706 flush_work(&mvm->async_handlers_wk);
486 707
487 mutex_lock(&mvm->mutex); 708 mutex_lock(&mvm->mutex);
709
710 /* disallow low power states when the FW is down */
711 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
712
488 /* async_handlers_wk is now blocked */ 713 /* async_handlers_wk is now blocked */
489 714
490 /* 715 /*
@@ -510,14 +735,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
510 cancel_work_sync(&mvm->async_handlers_wk); 735 cancel_work_sync(&mvm->async_handlers_wk);
511} 736}
512 737
513static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
514 struct ieee80211_vif *vif)
515{
516 struct iwl_mvm *mvm = data;
517
518 iwl_mvm_power_update_mode(mvm, vif);
519}
520
521static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 738static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
522{ 739{
523 u16 i; 740 u16 i;
@@ -585,7 +802,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
585 vif->type == NL80211_IFTYPE_ADHOC) { 802 vif->type == NL80211_IFTYPE_ADHOC) {
586 u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); 803 u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
587 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 804 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
588 qmask); 805 qmask,
806 ieee80211_vif_type_p2p(vif));
589 if (ret) { 807 if (ret) {
590 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 808 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
591 goto out_release; 809 goto out_release;
@@ -599,10 +817,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
599 if (ret) 817 if (ret)
600 goto out_release; 818 goto out_release;
601 819
602 iwl_mvm_power_disable(mvm, vif); 820 ret = iwl_mvm_power_update_mac(mvm, vif);
821 if (ret)
822 goto out_release;
603 823
604 /* beacon filtering */ 824 /* beacon filtering */
605 ret = iwl_mvm_disable_beacon_filter(mvm, vif); 825 ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
606 if (ret) 826 if (ret)
607 goto out_remove_mac; 827 goto out_remove_mac;
608 828
@@ -661,11 +881,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
661 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 881 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
662 mvm->vif_count--; 882 mvm->vif_count--;
663 883
664 /* TODO: remove this when legacy PM will be discarded */
665 ieee80211_iterate_active_interfaces(
666 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
667 iwl_mvm_power_update_iterator, mvm);
668
669 iwl_mvm_mac_ctxt_release(mvm, vif); 884 iwl_mvm_mac_ctxt_release(mvm, vif);
670 out_unlock: 885 out_unlock:
671 mutex_unlock(&mvm->mutex); 886 mutex_unlock(&mvm->mutex);
@@ -754,11 +969,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
754 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 969 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
755 mvm->vif_count--; 970 mvm->vif_count--;
756 971
757 /* TODO: remove this when legacy PM will be discarded */ 972 iwl_mvm_power_update_mac(mvm, vif);
758 ieee80211_iterate_active_interfaces(
759 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
760 iwl_mvm_power_update_iterator, mvm);
761
762 iwl_mvm_mac_ctxt_remove(mvm, vif); 973 iwl_mvm_mac_ctxt_remove(mvm, vif);
763 974
764out_release: 975out_release:
@@ -876,6 +1087,156 @@ out:
876 *total_flags = 0; 1087 *total_flags = 0;
877} 1088}
878 1089
1090#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1091struct iwl_bcast_iter_data {
1092 struct iwl_mvm *mvm;
1093 struct iwl_bcast_filter_cmd *cmd;
1094 u8 current_filter;
1095};
1096
1097static void
1098iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1099 const struct iwl_fw_bcast_filter *in_filter,
1100 struct iwl_fw_bcast_filter *out_filter)
1101{
1102 struct iwl_fw_bcast_filter_attr *attr;
1103 int i;
1104
1105 memcpy(out_filter, in_filter, sizeof(*out_filter));
1106
1107 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1108 attr = &out_filter->attrs[i];
1109
1110 if (!attr->mask)
1111 break;
1112
1113 switch (attr->reserved1) {
1114 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1115 if (vif->bss_conf.arp_addr_cnt != 1) {
1116 attr->mask = 0;
1117 continue;
1118 }
1119
1120 attr->val = vif->bss_conf.arp_addr_list[0];
1121 break;
1122 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1123 attr->val = *(__be32 *)&vif->addr[2];
1124 break;
1125 default:
1126 break;
1127 }
1128 attr->reserved1 = 0;
1129 out_filter->num_attrs++;
1130 }
1131}
1132
1133static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1134 struct ieee80211_vif *vif)
1135{
1136 struct iwl_bcast_iter_data *data = _data;
1137 struct iwl_mvm *mvm = data->mvm;
1138 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1139 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1140 struct iwl_fw_bcast_mac *bcast_mac;
1141 int i;
1142
1143 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1144 return;
1145
1146 bcast_mac = &cmd->macs[mvmvif->id];
1147
1148 /* enable filtering only for associated stations */
1149 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
1150 return;
1151
1152 bcast_mac->default_discard = 1;
1153
1154 /* copy all configured filters */
1155 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1156 /*
1157 * Make sure we don't exceed our filters limit.
1158 * if there is still a valid filter to be configured,
1159 * be on the safe side and just allow bcast for this mac.
1160 */
1161 if (WARN_ON_ONCE(data->current_filter >=
1162 ARRAY_SIZE(cmd->filters))) {
1163 bcast_mac->default_discard = 0;
1164 bcast_mac->attached_filters = 0;
1165 break;
1166 }
1167
1168 iwl_mvm_set_bcast_filter(vif,
1169 &mvm->bcast_filters[i],
1170 &cmd->filters[data->current_filter]);
1171
1172 /* skip current filter if it contains no attributes */
1173 if (!cmd->filters[data->current_filter].num_attrs)
1174 continue;
1175
1176 /* attach the filter to current mac */
1177 bcast_mac->attached_filters |=
1178 cpu_to_le16(BIT(data->current_filter));
1179
1180 data->current_filter++;
1181 }
1182}
1183
1184bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1185 struct iwl_bcast_filter_cmd *cmd)
1186{
1187 struct iwl_bcast_iter_data iter_data = {
1188 .mvm = mvm,
1189 .cmd = cmd,
1190 };
1191
1192 memset(cmd, 0, sizeof(*cmd));
1193 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1194 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1195
1196#ifdef CONFIG_IWLWIFI_DEBUGFS
1197 /* use debugfs filters/macs if override is configured */
1198 if (mvm->dbgfs_bcast_filtering.override) {
1199 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1200 sizeof(cmd->filters));
1201 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1202 sizeof(cmd->macs));
1203 return true;
1204 }
1205#endif
1206
1207 /* if no filters are configured, do nothing */
1208 if (!mvm->bcast_filters)
1209 return false;
1210
1211 /* configure and attach these filters for each associated sta vif */
1212 ieee80211_iterate_active_interfaces(
1213 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1214 iwl_mvm_bcast_filter_iterator, &iter_data);
1215
1216 return true;
1217}
1218static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
1219 struct ieee80211_vif *vif)
1220{
1221 struct iwl_bcast_filter_cmd cmd;
1222
1223 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1224 return 0;
1225
1226 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1227 return 0;
1228
1229 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
1230 sizeof(cmd), &cmd);
1231}
1232#else
1233static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
1234 struct ieee80211_vif *vif)
1235{
1236 return 0;
1237}
1238#endif
1239
879static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 1240static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
880 struct ieee80211_vif *vif, 1241 struct ieee80211_vif *vif,
881 struct ieee80211_bss_conf *bss_conf, 1242 struct ieee80211_bss_conf *bss_conf,
@@ -928,6 +1289,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
928 1289
929 iwl_mvm_sf_update(mvm, vif, false); 1290 iwl_mvm_sf_update(mvm, vif, false);
930 iwl_mvm_power_vif_assoc(mvm, vif); 1291 iwl_mvm_power_vif_assoc(mvm, vif);
1292 if (vif->p2p)
1293 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
931 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 1294 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
932 /* 1295 /*
933 * If update fails - SF might be running in associated 1296 * If update fails - SF might be running in associated
@@ -940,27 +1303,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
940 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); 1303 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
941 if (ret) 1304 if (ret)
942 IWL_ERR(mvm, "failed to remove AP station\n"); 1305 IWL_ERR(mvm, "failed to remove AP station\n");
1306
1307 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1308 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
943 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; 1309 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
944 /* remove quota for this interface */ 1310 /* remove quota for this interface */
945 ret = iwl_mvm_update_quotas(mvm, NULL); 1311 ret = iwl_mvm_update_quotas(mvm, NULL);
946 if (ret) 1312 if (ret)
947 IWL_ERR(mvm, "failed to update quotas\n"); 1313 IWL_ERR(mvm, "failed to update quotas\n");
1314
1315 if (vif->p2p)
1316 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
948 } 1317 }
949 1318
950 iwl_mvm_recalc_multicast(mvm); 1319 iwl_mvm_recalc_multicast(mvm);
1320 iwl_mvm_configure_bcast_filter(mvm, vif);
951 1321
952 /* reset rssi values */ 1322 /* reset rssi values */
953 mvmvif->bf_data.ave_beacon_signal = 0; 1323 mvmvif->bf_data.ave_beacon_signal = 0;
954 1324
955 if (!(mvm->fw->ucode_capa.flags &
956 IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
957 /* Workaround for FW bug, otherwise FW disables device
958 * power save upon disassociation
959 */
960 ret = iwl_mvm_power_update_mode(mvm, vif);
961 if (ret)
962 IWL_ERR(mvm, "failed to update power mode\n");
963 }
964 iwl_mvm_bt_coex_vif_change(mvm); 1325 iwl_mvm_bt_coex_vif_change(mvm);
965 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 1326 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
966 IEEE80211_SMPS_AUTOMATIC); 1327 IEEE80211_SMPS_AUTOMATIC);
@@ -971,9 +1332,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
971 */ 1332 */
972 iwl_mvm_remove_time_event(mvm, mvmvif, 1333 iwl_mvm_remove_time_event(mvm, mvmvif,
973 &mvmvif->time_event_data); 1334 &mvmvif->time_event_data);
1335 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
974 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | 1336 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
975 BSS_CHANGED_QOS)) { 1337 BSS_CHANGED_QOS)) {
976 ret = iwl_mvm_power_update_mode(mvm, vif); 1338 ret = iwl_mvm_power_update_mac(mvm, vif);
977 if (ret) 1339 if (ret)
978 IWL_ERR(mvm, "failed to update power mode\n"); 1340 IWL_ERR(mvm, "failed to update power mode\n");
979 } 1341 }
@@ -987,10 +1349,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
987 IWL_DEBUG_MAC80211(mvm, "cqm info_changed"); 1349 IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
988 /* reset cqm events tracking */ 1350 /* reset cqm events tracking */
989 mvmvif->bf_data.last_cqm_event = 0; 1351 mvmvif->bf_data.last_cqm_event = 0;
990 ret = iwl_mvm_update_beacon_filter(mvm, vif); 1352 ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
991 if (ret) 1353 if (ret)
992 IWL_ERR(mvm, "failed to update CQM thresholds\n"); 1354 IWL_ERR(mvm, "failed to update CQM thresholds\n");
993 } 1355 }
1356
1357 if (changes & BSS_CHANGED_ARP_FILTER) {
1358 IWL_DEBUG_MAC80211(mvm, "arp filter changed");
1359 iwl_mvm_configure_bcast_filter(mvm, vif);
1360 }
994} 1361}
995 1362
996static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 1363static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
@@ -1024,8 +1391,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1024 if (ret) 1391 if (ret)
1025 goto out_remove; 1392 goto out_remove;
1026 1393
1027 mvmvif->ap_ibss_active = true;
1028
1029 /* Send the bcast station. At this stage the TBTT and DTIM time events 1394 /* Send the bcast station. At this stage the TBTT and DTIM time events
1030 * are added and applied to the scheduler */ 1395 * are added and applied to the scheduler */
1031 ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta); 1396 ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
@@ -1036,8 +1401,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1036 mvmvif->ap_ibss_active = true; 1401 mvmvif->ap_ibss_active = true;
1037 1402
1038 /* power updated needs to be done before quotas */ 1403 /* power updated needs to be done before quotas */
1039 mvm->bound_vif_cnt++; 1404 iwl_mvm_power_update_mac(mvm, vif);
1040 iwl_mvm_power_update_binding(mvm, vif, true);
1041 1405
1042 ret = iwl_mvm_update_quotas(mvm, vif); 1406 ret = iwl_mvm_update_quotas(mvm, vif);
1043 if (ret) 1407 if (ret)
@@ -1047,14 +1411,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1047 if (vif->p2p && mvm->p2p_device_vif) 1411 if (vif->p2p && mvm->p2p_device_vif)
1048 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 1412 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
1049 1413
1414 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
1415
1050 iwl_mvm_bt_coex_vif_change(mvm); 1416 iwl_mvm_bt_coex_vif_change(mvm);
1051 1417
1052 mutex_unlock(&mvm->mutex); 1418 mutex_unlock(&mvm->mutex);
1053 return 0; 1419 return 0;
1054 1420
1055out_quota_failed: 1421out_quota_failed:
1056 mvm->bound_vif_cnt--; 1422 iwl_mvm_power_update_mac(mvm, vif);
1057 iwl_mvm_power_update_binding(mvm, vif, false);
1058 mvmvif->ap_ibss_active = false; 1423 mvmvif->ap_ibss_active = false;
1059 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); 1424 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
1060out_unbind: 1425out_unbind:
@@ -1080,6 +1445,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
1080 1445
1081 iwl_mvm_bt_coex_vif_change(mvm); 1446 iwl_mvm_bt_coex_vif_change(mvm);
1082 1447
1448 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
1449
1083 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 1450 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
1084 if (vif->p2p && mvm->p2p_device_vif) 1451 if (vif->p2p && mvm->p2p_device_vif)
1085 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 1452 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -1088,8 +1455,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
1088 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); 1455 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
1089 iwl_mvm_binding_remove_vif(mvm, vif); 1456 iwl_mvm_binding_remove_vif(mvm, vif);
1090 1457
1091 mvm->bound_vif_cnt--; 1458 iwl_mvm_power_update_mac(mvm, vif);
1092 iwl_mvm_power_update_binding(mvm, vif, false);
1093 1459
1094 iwl_mvm_mac_ctxt_remove(mvm, vif); 1460 iwl_mvm_mac_ctxt_remove(mvm, vif);
1095 1461
@@ -1103,26 +1469,20 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
1103 u32 changes) 1469 u32 changes)
1104{ 1470{
1105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1471 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1106 enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
1107 BSS_CHANGED_HT |
1108 BSS_CHANGED_BANDWIDTH;
1109 int ret;
1110 1472
1111 /* Changes will be applied when the AP/IBSS is started */ 1473 /* Changes will be applied when the AP/IBSS is started */
1112 if (!mvmvif->ap_ibss_active) 1474 if (!mvmvif->ap_ibss_active)
1113 return; 1475 return;
1114 1476
1115 if (changes & ht_change) { 1477 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
1116 ret = iwl_mvm_mac_ctxt_changed(mvm, vif); 1478 BSS_CHANGED_BANDWIDTH) &&
1117 if (ret) 1479 iwl_mvm_mac_ctxt_changed(mvm, vif))
1118 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 1480 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1119 }
1120 1481
1121 /* Need to send a new beacon template to the FW */ 1482 /* Need to send a new beacon template to the FW */
1122 if (changes & BSS_CHANGED_BEACON) { 1483 if (changes & BSS_CHANGED_BEACON &&
1123 if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 1484 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
1124 IWL_WARN(mvm, "Failed updating beacon data\n"); 1485 IWL_WARN(mvm, "Failed updating beacon data\n");
1125 }
1126} 1486}
1127 1487
1128static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 1488static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -1162,13 +1522,30 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
1162 1522
1163 mutex_lock(&mvm->mutex); 1523 mutex_lock(&mvm->mutex);
1164 1524
1165 if (mvm->scan_status == IWL_MVM_SCAN_NONE) 1525 switch (mvm->scan_status) {
1166 ret = iwl_mvm_scan_request(mvm, vif, req); 1526 case IWL_MVM_SCAN_SCHED:
1167 else 1527 ret = iwl_mvm_sched_scan_stop(mvm);
1528 if (ret) {
1529 ret = -EBUSY;
1530 goto out;
1531 }
1532 break;
1533 case IWL_MVM_SCAN_NONE:
1534 break;
1535 default:
1168 ret = -EBUSY; 1536 ret = -EBUSY;
1537 goto out;
1538 }
1169 1539
1170 mutex_unlock(&mvm->mutex); 1540 iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
1171 1541
1542 ret = iwl_mvm_scan_request(mvm, vif, req);
1543 if (ret)
1544 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1545out:
1546 mutex_unlock(&mvm->mutex);
1547 /* make sure to flush the Rx handler before the next scan arrives */
1548 iwl_mvm_wait_for_async_handlers(mvm);
1172 return ret; 1549 return ret;
1173} 1550}
1174 1551
@@ -1186,20 +1563,32 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
1186 1563
1187static void 1564static void
1188iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 1565iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
1189 struct ieee80211_sta *sta, u16 tid, 1566 struct ieee80211_sta *sta, u16 tids,
1190 int num_frames, 1567 int num_frames,
1191 enum ieee80211_frame_release_type reason, 1568 enum ieee80211_frame_release_type reason,
1192 bool more_data) 1569 bool more_data)
1193{ 1570{
1194 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1571 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1195 1572
1196 /* TODO: how do we tell the fw to send frames for a specific TID */ 1573 /* Called when we need to transmit (a) frame(s) from mac80211 */
1197 1574
1198 /* 1575 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
1199 * The fw will send EOSP notification when the last frame will be 1576 tids, more_data, false);
1200 * transmitted. 1577}
1201 */ 1578
1202 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames); 1579static void
1580iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
1581 struct ieee80211_sta *sta, u16 tids,
1582 int num_frames,
1583 enum ieee80211_frame_release_type reason,
1584 bool more_data)
1585{
1586 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1587
1588 /* Called when we need to transmit (a) frame(s) from agg queue */
1589
1590 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
1591 tids, more_data, true);
1203} 1592}
1204 1593
1205static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 1594static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1209,11 +1598,25 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
1209{ 1598{
1210 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1599 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1211 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1600 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1601 int tid;
1212 1602
1213 switch (cmd) { 1603 switch (cmd) {
1214 case STA_NOTIFY_SLEEP: 1604 case STA_NOTIFY_SLEEP:
1215 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) 1605 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
1216 ieee80211_sta_block_awake(hw, sta, true); 1606 ieee80211_sta_block_awake(hw, sta, true);
1607 spin_lock_bh(&mvmsta->lock);
1608 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1609 struct iwl_mvm_tid_data *tid_data;
1610
1611 tid_data = &mvmsta->tid_data[tid];
1612 if (tid_data->state != IWL_AGG_ON &&
1613 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
1614 continue;
1615 if (iwl_mvm_tid_queued(tid_data) == 0)
1616 continue;
1617 ieee80211_sta_set_buffered(sta, tid, true);
1618 }
1619 spin_unlock_bh(&mvmsta->lock);
1217 /* 1620 /*
1218 * The fw updates the STA to be asleep. Tx packets on the Tx 1621 * The fw updates the STA to be asleep. Tx packets on the Tx
1219 * queues to this station will not be transmitted. The fw will 1622 * queues to this station will not be transmitted. The fw will
@@ -1304,12 +1707,14 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1304 } else if (old_state == IEEE80211_STA_ASSOC && 1707 } else if (old_state == IEEE80211_STA_ASSOC &&
1305 new_state == IEEE80211_STA_AUTHORIZED) { 1708 new_state == IEEE80211_STA_AUTHORIZED) {
1306 /* enable beacon filtering */ 1709 /* enable beacon filtering */
1307 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif)); 1710 if (vif->bss_conf.dtim_period)
1711 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
1712 CMD_SYNC));
1308 ret = 0; 1713 ret = 0;
1309 } else if (old_state == IEEE80211_STA_AUTHORIZED && 1714 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
1310 new_state == IEEE80211_STA_ASSOC) { 1715 new_state == IEEE80211_STA_ASSOC) {
1311 /* disable beacon filtering */ 1716 /* disable beacon filtering */
1312 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif)); 1717 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
1313 ret = 0; 1718 ret = 0;
1314 } else if (old_state == IEEE80211_STA_ASSOC && 1719 } else if (old_state == IEEE80211_STA_ASSOC &&
1315 new_state == IEEE80211_STA_AUTH) { 1720 new_state == IEEE80211_STA_AUTH) {
@@ -1401,9 +1806,26 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
1401 1806
1402 mutex_lock(&mvm->mutex); 1807 mutex_lock(&mvm->mutex);
1403 1808
1404 if (mvm->scan_status != IWL_MVM_SCAN_NONE) { 1809 switch (mvm->scan_status) {
1405 IWL_DEBUG_SCAN(mvm, 1810 case IWL_MVM_SCAN_OS:
1406 "SCHED SCAN request during internal scan - abort\n"); 1811 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
1812 ret = iwl_mvm_cancel_scan(mvm);
1813 if (ret) {
1814 ret = -EBUSY;
1815 goto out;
1816 }
1817
1818 /*
1819 * iwl_mvm_rx_scan_complete() will be called soon but will
1820 * not reset the scan status as it won't be IWL_MVM_SCAN_OS
1821 * any more since we queue the next scan immediately (below).
1822 * We make sure it is called before the next scan starts by
1823 * flushing the async-handlers work.
1824 */
1825 break;
1826 case IWL_MVM_SCAN_NONE:
1827 break;
1828 default:
1407 ret = -EBUSY; 1829 ret = -EBUSY;
1408 goto out; 1830 goto out;
1409 } 1831 }
@@ -1425,17 +1847,23 @@ err:
1425 mvm->scan_status = IWL_MVM_SCAN_NONE; 1847 mvm->scan_status = IWL_MVM_SCAN_NONE;
1426out: 1848out:
1427 mutex_unlock(&mvm->mutex); 1849 mutex_unlock(&mvm->mutex);
1850 /* make sure to flush the Rx handler before the next scan arrives */
1851 iwl_mvm_wait_for_async_handlers(mvm);
1428 return ret; 1852 return ret;
1429} 1853}
1430 1854
1431static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 1855static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
1432 struct ieee80211_vif *vif) 1856 struct ieee80211_vif *vif)
1433{ 1857{
1434 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1858 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1859 int ret;
1435 1860
1436 mutex_lock(&mvm->mutex); 1861 mutex_lock(&mvm->mutex);
1437 iwl_mvm_sched_scan_stop(mvm); 1862 ret = iwl_mvm_sched_scan_stop(mvm);
1438 mutex_unlock(&mvm->mutex); 1863 mutex_unlock(&mvm->mutex);
1864 iwl_mvm_wait_for_async_handlers(mvm);
1865
1866 return ret;
1439} 1867}
1440 1868
1441static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 1869static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
@@ -1773,8 +2201,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
1773 * Power state must be updated before quotas, 2201 * Power state must be updated before quotas,
1774 * otherwise fw will complain. 2202 * otherwise fw will complain.
1775 */ 2203 */
1776 mvm->bound_vif_cnt++; 2204 iwl_mvm_power_update_mac(mvm, vif);
1777 iwl_mvm_power_update_binding(mvm, vif, true);
1778 2205
1779 /* Setting the quota at this stage is only required for monitor 2206 /* Setting the quota at this stage is only required for monitor
1780 * interfaces. For the other types, the bss_info changed flow 2207 * interfaces. For the other types, the bss_info changed flow
@@ -1791,8 +2218,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
1791 2218
1792 out_remove_binding: 2219 out_remove_binding:
1793 iwl_mvm_binding_remove_vif(mvm, vif); 2220 iwl_mvm_binding_remove_vif(mvm, vif);
1794 mvm->bound_vif_cnt--; 2221 iwl_mvm_power_update_mac(mvm, vif);
1795 iwl_mvm_power_update_binding(mvm, vif, false);
1796 out_unlock: 2222 out_unlock:
1797 mutex_unlock(&mvm->mutex); 2223 mutex_unlock(&mvm->mutex);
1798 if (ret) 2224 if (ret)
@@ -1824,8 +2250,7 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
1824 } 2250 }
1825 2251
1826 iwl_mvm_binding_remove_vif(mvm, vif); 2252 iwl_mvm_binding_remove_vif(mvm, vif);
1827 mvm->bound_vif_cnt--; 2253 iwl_mvm_power_update_mac(mvm, vif);
1828 iwl_mvm_power_update_binding(mvm, vif, false);
1829 2254
1830out_unlock: 2255out_unlock:
1831 mvmvif->phy_ctxt = NULL; 2256 mvmvif->phy_ctxt = NULL;
@@ -1892,8 +2317,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
1892 return -EINVAL; 2317 return -EINVAL;
1893 2318
1894 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 2319 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
1895 return iwl_mvm_enable_beacon_filter(mvm, vif); 2320 return iwl_mvm_enable_beacon_filter(mvm, vif,
1896 return iwl_mvm_disable_beacon_filter(mvm, vif); 2321 CMD_SYNC);
2322 return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
1897 } 2323 }
1898 2324
1899 return -EOPNOTSUPP; 2325 return -EOPNOTSUPP;
@@ -1914,7 +2340,7 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
1914} 2340}
1915#endif 2341#endif
1916 2342
1917struct ieee80211_ops iwl_mvm_hw_ops = { 2343const struct ieee80211_ops iwl_mvm_hw_ops = {
1918 .tx = iwl_mvm_mac_tx, 2344 .tx = iwl_mvm_mac_tx,
1919 .ampdu_action = iwl_mvm_mac_ampdu_action, 2345 .ampdu_action = iwl_mvm_mac_ampdu_action,
1920 .start = iwl_mvm_mac_start, 2346 .start = iwl_mvm_mac_start,
@@ -1932,6 +2358,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
1932 .sta_state = iwl_mvm_mac_sta_state, 2358 .sta_state = iwl_mvm_mac_sta_state,
1933 .sta_notify = iwl_mvm_mac_sta_notify, 2359 .sta_notify = iwl_mvm_mac_sta_notify,
1934 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 2360 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
2361 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
1935 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 2362 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
1936 .sta_rc_update = iwl_mvm_sta_rc_update, 2363 .sta_rc_update = iwl_mvm_sta_rc_update,
1937 .conf_tx = iwl_mvm_mac_conf_tx, 2364 .conf_tx = iwl_mvm_mac_conf_tx,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 2b0ba1fc3c82..d564233a65da 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -91,9 +91,7 @@ enum iwl_mvm_tx_fifo {
91 IWL_MVM_TX_FIFO_MCAST = 5, 91 IWL_MVM_TX_FIFO_MCAST = 5,
92}; 92};
93 93
94extern struct ieee80211_ops iwl_mvm_hw_ops; 94extern const struct ieee80211_ops iwl_mvm_hw_ops;
95extern const struct iwl_mvm_power_ops pm_legacy_ops;
96extern const struct iwl_mvm_power_ops pm_mac_ops;
97 95
98/** 96/**
99 * struct iwl_mvm_mod_params - module parameters for iwlmvm 97 * struct iwl_mvm_mod_params - module parameters for iwlmvm
@@ -159,20 +157,6 @@ enum iwl_power_scheme {
159 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 157 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
160#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 158#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
161 159
162struct iwl_mvm_power_ops {
163 int (*power_update_mode)(struct iwl_mvm *mvm,
164 struct ieee80211_vif *vif);
165 int (*power_update_device_mode)(struct iwl_mvm *mvm);
166 int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
167 void (*power_update_binding)(struct iwl_mvm *mvm,
168 struct ieee80211_vif *vif, bool assign);
169#ifdef CONFIG_IWLWIFI_DEBUGFS
170 int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
171 char *buf, int bufsz);
172#endif
173};
174
175
176#ifdef CONFIG_IWLWIFI_DEBUGFS 160#ifdef CONFIG_IWLWIFI_DEBUGFS
177enum iwl_dbgfs_pm_mask { 161enum iwl_dbgfs_pm_mask {
178 MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), 162 MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
@@ -239,6 +223,19 @@ enum iwl_mvm_smps_type_request {
239 NUM_IWL_MVM_SMPS_REQ, 223 NUM_IWL_MVM_SMPS_REQ,
240}; 224};
241 225
226enum iwl_mvm_ref_type {
227 IWL_MVM_REF_UCODE_DOWN,
228 IWL_MVM_REF_SCAN,
229 IWL_MVM_REF_ROC,
230 IWL_MVM_REF_P2P_CLIENT,
231 IWL_MVM_REF_AP_IBSS,
232 IWL_MVM_REF_USER,
233 IWL_MVM_REF_TX,
234 IWL_MVM_REF_TX_AGG,
235
236 IWL_MVM_REF_COUNT,
237};
238
242/** 239/**
243* struct iwl_mvm_vif_bf_data - beacon filtering related data 240* struct iwl_mvm_vif_bf_data - beacon filtering related data
244* @bf_enabled: indicates if beacon filtering is enabled 241* @bf_enabled: indicates if beacon filtering is enabled
@@ -269,7 +266,9 @@ struct iwl_mvm_vif_bf_data {
269 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface 266 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
270 * should get quota etc. 267 * should get quota etc.
271 * @monitor_active: indicates that monitor context is configured, and that the 268 * @monitor_active: indicates that monitor context is configured, and that the
272 * interface should get quota etc. 269 * interface should get quota etc.
270 * @low_latency: indicates that this interface is in low-latency mode
271 * (VMACLowLatencyMode)
273 * @queue_params: QoS params for this MAC 272 * @queue_params: QoS params for this MAC
274 * @bcast_sta: station used for broadcast packets. Used by the following 273 * @bcast_sta: station used for broadcast packets. Used by the following
275 * vifs: P2P_DEVICE, GO and AP. 274 * vifs: P2P_DEVICE, GO and AP.
@@ -285,6 +284,7 @@ struct iwl_mvm_vif {
285 bool uploaded; 284 bool uploaded;
286 bool ap_ibss_active; 285 bool ap_ibss_active;
287 bool monitor_active; 286 bool monitor_active;
287 bool low_latency;
288 struct iwl_mvm_vif_bf_data bf_data; 288 struct iwl_mvm_vif_bf_data bf_data;
289 289
290 u32 ap_beacon_time; 290 u32 ap_beacon_time;
@@ -319,13 +319,13 @@ struct iwl_mvm_vif {
319 319
320 bool seqno_valid; 320 bool seqno_valid;
321 u16 seqno; 321 u16 seqno;
322#endif
322 323
323#if IS_ENABLED(CONFIG_IPV6) 324#if IS_ENABLED(CONFIG_IPV6)
324 /* IPv6 addresses for WoWLAN */ 325 /* IPv6 addresses for WoWLAN */
325 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; 326 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
326 int num_target_ipv6_addrs; 327 int num_target_ipv6_addrs;
327#endif 328#endif
328#endif
329 329
330#ifdef CONFIG_IWLWIFI_DEBUGFS 330#ifdef CONFIG_IWLWIFI_DEBUGFS
331 struct iwl_mvm *mvm; 331 struct iwl_mvm *mvm;
@@ -333,14 +333,13 @@ struct iwl_mvm_vif {
333 struct dentry *dbgfs_slink; 333 struct dentry *dbgfs_slink;
334 struct iwl_dbgfs_pm dbgfs_pm; 334 struct iwl_dbgfs_pm dbgfs_pm;
335 struct iwl_dbgfs_bf dbgfs_bf; 335 struct iwl_dbgfs_bf dbgfs_bf;
336 struct iwl_mac_power_cmd mac_pwr_cmd;
336#endif 337#endif
337 338
338 enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; 339 enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
339 340
340 /* FW identified misbehaving AP */ 341 /* FW identified misbehaving AP */
341 u8 uapsd_misbehaving_bssid[ETH_ALEN]; 342 u8 uapsd_misbehaving_bssid[ETH_ALEN];
342
343 bool pm_prevented;
344}; 343};
345 344
346static inline struct iwl_mvm_vif * 345static inline struct iwl_mvm_vif *
@@ -349,6 +348,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
349 return (void *)vif->drv_priv; 348 return (void *)vif->drv_priv;
350} 349}
351 350
351extern const u8 tid_to_mac80211_ac[];
352
352enum iwl_scan_status { 353enum iwl_scan_status {
353 IWL_MVM_SCAN_NONE, 354 IWL_MVM_SCAN_NONE,
354 IWL_MVM_SCAN_OS, 355 IWL_MVM_SCAN_OS,
@@ -415,6 +416,7 @@ struct iwl_tt_params {
415 * @ct_kill_exit: worker to exit thermal kill 416 * @ct_kill_exit: worker to exit thermal kill
416 * @dynamic_smps: Is thermal throttling enabled dynamic_smps? 417 * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
417 * @tx_backoff: The current thremal throttling tx backoff in uSec. 418 * @tx_backoff: The current thremal throttling tx backoff in uSec.
419 * @min_backoff: The minimal tx backoff due to power restrictions
418 * @params: Parameters to configure the thermal throttling algorithm. 420 * @params: Parameters to configure the thermal throttling algorithm.
419 * @throttle: Is thermal throttling is active? 421 * @throttle: Is thermal throttling is active?
420 */ 422 */
@@ -422,10 +424,33 @@ struct iwl_mvm_tt_mgmt {
422 struct delayed_work ct_kill_exit; 424 struct delayed_work ct_kill_exit;
423 bool dynamic_smps; 425 bool dynamic_smps;
424 u32 tx_backoff; 426 u32 tx_backoff;
427 u32 min_backoff;
425 const struct iwl_tt_params *params; 428 const struct iwl_tt_params *params;
426 bool throttle; 429 bool throttle;
427}; 430};
428 431
432#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
433
434struct iwl_mvm_frame_stats {
435 u32 legacy_frames;
436 u32 ht_frames;
437 u32 vht_frames;
438 u32 bw_20_frames;
439 u32 bw_40_frames;
440 u32 bw_80_frames;
441 u32 bw_160_frames;
442 u32 sgi_frames;
443 u32 ngi_frames;
444 u32 siso_frames;
445 u32 mimo2_frames;
446 u32 agg_frames;
447 u32 ampdu_count;
448 u32 success_frames;
449 u32 fail_frames;
450 u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
451 int last_frame_idx;
452};
453
429struct iwl_mvm { 454struct iwl_mvm {
430 /* for logger access */ 455 /* for logger access */
431 struct device *dev; 456 struct device *dev;
@@ -457,6 +482,8 @@ struct iwl_mvm {
457 bool init_ucode_complete; 482 bool init_ucode_complete;
458 u32 error_event_table; 483 u32 error_event_table;
459 u32 log_event_table; 484 u32 log_event_table;
485 u32 umac_error_event_table;
486 bool support_umac_log;
460 487
461 u32 ampdu_ref; 488 u32 ampdu_ref;
462 489
@@ -470,7 +497,7 @@ struct iwl_mvm {
470 497
471 struct iwl_nvm_data *nvm_data; 498 struct iwl_nvm_data *nvm_data;
472 /* NVM sections */ 499 /* NVM sections */
473 struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS]; 500 struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
474 501
475 /* EEPROM MAC addresses */ 502 /* EEPROM MAC addresses */
476 struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; 503 struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@@ -494,6 +521,17 @@ struct iwl_mvm {
494 /* rx chain antennas set through debugfs for the scan command */ 521 /* rx chain antennas set through debugfs for the scan command */
495 u8 scan_rx_ant; 522 u8 scan_rx_ant;
496 523
524#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
525 /* broadcast filters to configure for each associated station */
526 const struct iwl_fw_bcast_filter *bcast_filters;
527#ifdef CONFIG_IWLWIFI_DEBUGFS
528 struct {
529 u32 override; /* u32 for debugfs_create_bool */
530 struct iwl_bcast_filter_cmd cmd;
531 } dbgfs_bcast_filtering;
532#endif
533#endif
534
497 /* Internal station */ 535 /* Internal station */
498 struct iwl_mvm_int_sta aux_sta; 536 struct iwl_mvm_int_sta aux_sta;
499 537
@@ -506,6 +544,7 @@ struct iwl_mvm {
506#ifdef CONFIG_IWLWIFI_DEBUGFS 544#ifdef CONFIG_IWLWIFI_DEBUGFS
507 struct dentry *debugfs_dir; 545 struct dentry *debugfs_dir;
508 u32 dbgfs_sram_offset, dbgfs_sram_len; 546 u32 dbgfs_sram_offset, dbgfs_sram_len;
547 u32 dbgfs_prph_reg_addr;
509 bool disable_power_off; 548 bool disable_power_off;
510 bool disable_power_off_d3; 549 bool disable_power_off_d3;
511 550
@@ -513,6 +552,9 @@ struct iwl_mvm {
513 struct debugfs_blob_wrapper nvm_sw_blob; 552 struct debugfs_blob_wrapper nvm_sw_blob;
514 struct debugfs_blob_wrapper nvm_calib_blob; 553 struct debugfs_blob_wrapper nvm_calib_blob;
515 struct debugfs_blob_wrapper nvm_prod_blob; 554 struct debugfs_blob_wrapper nvm_prod_blob;
555
556 struct iwl_mvm_frame_stats drv_rx_stats;
557 spinlock_t drv_stats_lock;
516#endif 558#endif
517 559
518 struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; 560 struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -526,10 +568,16 @@ struct iwl_mvm {
526 */ 568 */
527 unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; 569 unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
528 570
571 /* A bitmap of reference types taken by the driver. */
572 unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
573
529 u8 vif_count; 574 u8 vif_count;
530 575
531 /* -1 for always, 0 for never, >0 for that many times */ 576 /* -1 for always, 0 for never, >0 for that many times */
532 s8 restart_fw; 577 s8 restart_fw;
578 void *fw_error_dump;
579 void *fw_error_sram;
580 u32 fw_error_sram_len;
533 581
534 struct led_classdev led; 582 struct led_classdev led;
535 583
@@ -548,17 +596,27 @@ struct iwl_mvm {
548#endif 596#endif
549#endif 597#endif
550 598
599 /* d0i3 */
600 u8 d0i3_ap_sta_id;
601 bool d0i3_offloading;
602 struct work_struct d0i3_exit_work;
603 struct sk_buff_head d0i3_tx;
604 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
605 spinlock_t d0i3_tx_lock;
606 wait_queue_head_t d0i3_exit_waitq;
607
551 /* BT-Coex */ 608 /* BT-Coex */
552 u8 bt_kill_msk; 609 u8 bt_kill_msk;
553 struct iwl_bt_coex_profile_notif last_bt_notif; 610 struct iwl_bt_coex_profile_notif last_bt_notif;
554 struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; 611 struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
612 u32 last_ant_isol;
613 u8 last_corun_lut;
614 u8 bt_tx_prio;
555 615
556 /* Thermal Throttling and CTkill */ 616 /* Thermal Throttling and CTkill */
557 struct iwl_mvm_tt_mgmt thermal_throttle; 617 struct iwl_mvm_tt_mgmt thermal_throttle;
558 s32 temperature; /* Celsius */ 618 s32 temperature; /* Celsius */
559 619
560 const struct iwl_mvm_power_ops *pm_ops;
561
562#ifdef CONFIG_NL80211_TESTMODE 620#ifdef CONFIG_NL80211_TESTMODE
563 u32 noa_duration; 621 u32 noa_duration;
564 struct ieee80211_vif *noa_vif; 622 struct ieee80211_vif *noa_vif;
@@ -569,10 +627,10 @@ struct iwl_mvm {
569 u8 first_agg_queue; 627 u8 first_agg_queue;
570 u8 last_agg_queue; 628 u8 last_agg_queue;
571 629
572 u8 bound_vif_cnt;
573
574 /* Indicate if device power save is allowed */ 630 /* Indicate if device power save is allowed */
575 bool ps_prevented; 631 bool ps_disabled;
632 /* Indicate if device power management is allowed */
633 bool pm_disabled;
576}; 634};
577 635
578/* Extract MVM priv from op_mode and _hw */ 636/* Extract MVM priv from op_mode and _hw */
@@ -587,6 +645,7 @@ enum iwl_mvm_status {
587 IWL_MVM_STATUS_HW_CTKILL, 645 IWL_MVM_STATUS_HW_CTKILL,
588 IWL_MVM_STATUS_ROC_RUNNING, 646 IWL_MVM_STATUS_ROC_RUNNING,
589 IWL_MVM_STATUS_IN_HW_RESTART, 647 IWL_MVM_STATUS_IN_HW_RESTART,
648 IWL_MVM_STATUS_IN_D0I3,
590}; 649};
591 650
592static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) 651static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -595,6 +654,30 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
595 test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); 654 test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
596} 655}
597 656
657static inline struct iwl_mvm_sta *
658iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
659{
660 struct ieee80211_sta *sta;
661
662 if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
663 return NULL;
664
665 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
666 lockdep_is_held(&mvm->mutex));
667
668 /* This can happen if the station has been removed right now */
669 if (IS_ERR_OR_NULL(sta))
670 return NULL;
671
672 return iwl_mvm_sta_from_mac80211(sta);
673}
674
675static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
676{
677 return mvm->trans->cfg->d0i3 &&
678 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
679}
680
598extern const u8 iwl_mvm_ac_to_tx_fifo[]; 681extern const u8 iwl_mvm_ac_to_tx_fifo[];
599 682
600struct iwl_rate_info { 683struct iwl_rate_info {
@@ -619,7 +702,10 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
619 struct ieee80211_tx_rate *r); 702 struct ieee80211_tx_rate *r);
620u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); 703u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
621void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); 704void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
622void iwl_mvm_dump_sram(struct iwl_mvm *mvm); 705#ifdef CONFIG_IWLWIFI_DEBUGFS
706void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
707void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
708#endif
623u8 first_antenna(u8 mask); 709u8 first_antenna(u8 mask);
624u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); 710u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
625 711
@@ -645,6 +731,11 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
645int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync); 731int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
646void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); 732void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
647 733
734static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
735{
736 flush_work(&mvm->async_handlers_wk);
737}
738
648/* Statistics */ 739/* Statistics */
649int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm, 740int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
650 struct iwl_rx_cmd_buffer *rxb, 741 struct iwl_rx_cmd_buffer *rxb,
@@ -661,6 +752,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
661int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); 752int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
662 753
663int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); 754int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
755bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
756 struct iwl_bcast_filter_cmd *cmd);
664 757
665/* 758/*
666 * FW notifications / CMD responses handlers 759 * FW notifications / CMD responses handlers
@@ -676,6 +769,9 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
676 struct iwl_device_cmd *cmd); 769 struct iwl_device_cmd *cmd);
677int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, 770int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
678 struct iwl_device_cmd *cmd); 771 struct iwl_device_cmd *cmd);
772int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
773 struct iwl_rx_cmd_buffer *rxb,
774 struct iwl_device_cmd *cmd);
679int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, 775int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
680 struct iwl_device_cmd *cmd); 776 struct iwl_device_cmd *cmd);
681int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, 777int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
@@ -730,7 +826,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
730 struct iwl_device_cmd *cmd); 826 struct iwl_device_cmd *cmd);
731int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, 827int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
732 struct iwl_device_cmd *cmd); 828 struct iwl_device_cmd *cmd);
733void iwl_mvm_cancel_scan(struct iwl_mvm *mvm); 829int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
734 830
735/* Scheduled scan */ 831/* Scheduled scan */
736int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, 832int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -744,7 +840,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
744 struct cfg80211_sched_scan_request *req); 840 struct cfg80211_sched_scan_request *req);
745int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, 841int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
746 struct cfg80211_sched_scan_request *req); 842 struct cfg80211_sched_scan_request *req);
747void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm); 843int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
748int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm, 844int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
749 struct iwl_rx_cmd_buffer *rxb, 845 struct iwl_rx_cmd_buffer *rxb,
750 struct iwl_device_cmd *cmd); 846 struct iwl_device_cmd *cmd);
@@ -772,49 +868,24 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
772 868
773/* rate scaling */ 869/* rate scaling */
774int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); 870int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
871void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
872 struct iwl_mvm_frame_stats *stats,
873 u32 rate, bool agg);
874int rs_pretty_print_rate(char *buf, const u32 rate);
775 875
776/* power managment */ 876/* power management */
777static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, 877int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
778 struct ieee80211_vif *vif)
779{
780 return mvm->pm_ops->power_update_mode(mvm, vif);
781}
782
783static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
784 struct ieee80211_vif *vif)
785{
786 return mvm->pm_ops->power_disable(mvm, vif);
787}
788
789static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
790{
791 if (mvm->pm_ops->power_update_device_mode)
792 return mvm->pm_ops->power_update_device_mode(mvm);
793 return 0;
794}
795 878
796static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm, 879int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
797 struct ieee80211_vif *vif, 880int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
798 bool assign) 881int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
799{ 882 char *buf, int bufsz);
800 if (mvm->pm_ops->power_update_binding)
801 mvm->pm_ops->power_update_binding(mvm, vif, assign);
802}
803 883
804void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 884void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
805int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, 885int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
806 struct iwl_rx_cmd_buffer *rxb, 886 struct iwl_rx_cmd_buffer *rxb,
807 struct iwl_device_cmd *cmd); 887 struct iwl_device_cmd *cmd);
808 888
809#ifdef CONFIG_IWLWIFI_DEBUGFS
810static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
811 struct ieee80211_vif *vif,
812 char *buf, int bufsz)
813{
814 return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
815}
816#endif
817
818int iwl_mvm_leds_init(struct iwl_mvm *mvm); 889int iwl_mvm_leds_init(struct iwl_mvm *mvm);
819void iwl_mvm_leds_exit(struct iwl_mvm *mvm); 890void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
820 891
@@ -840,6 +911,17 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
840{ 911{
841} 912}
842#endif 913#endif
914void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
915 struct iwl_wowlan_config_cmd_v2 *cmd);
916int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
917 struct ieee80211_vif *vif,
918 bool disable_offloading,
919 u32 cmd_flags);
920
921/* D0i3 */
922void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
923void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
924void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
843 925
844/* BT Coex */ 926/* BT Coex */
845int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm); 927int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -850,10 +932,13 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
850void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 932void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
851 enum ieee80211_rssi_event rssi_event); 933 enum ieee80211_rssi_event rssi_event);
852void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); 934void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
853u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm, 935u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
854 struct ieee80211_sta *sta); 936 struct ieee80211_sta *sta);
855bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, 937bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
856 struct ieee80211_sta *sta); 938 struct ieee80211_sta *sta);
939u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
940 struct ieee80211_tx_info *info, u8 ac);
941int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
857 942
858enum iwl_bt_kill_msk { 943enum iwl_bt_kill_msk {
859 BT_KILL_MSK_DEFAULT, 944 BT_KILL_MSK_DEFAULT,
@@ -875,25 +960,53 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
875 struct iwl_beacon_filter_cmd *cmd) 960 struct iwl_beacon_filter_cmd *cmd)
876{} 961{}
877#endif 962#endif
963int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
964 struct ieee80211_vif *vif,
965 bool enable, u32 flags);
878int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, 966int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
879 struct ieee80211_vif *vif); 967 struct ieee80211_vif *vif,
968 u32 flags);
880int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 969int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
881 struct ieee80211_vif *vif); 970 struct ieee80211_vif *vif,
882int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, 971 u32 flags);
883 struct iwl_beacon_filter_cmd *cmd);
884int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, 972int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
885 struct ieee80211_vif *vif, bool enable); 973 struct ieee80211_vif *vif, bool enable);
886int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm, 974int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
887 struct ieee80211_vif *vif); 975 struct ieee80211_vif *vif,
976 bool force,
977 u32 flags);
888 978
889/* SMPS */ 979/* SMPS */
890void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 980void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
891 enum iwl_mvm_smps_type_request req_type, 981 enum iwl_mvm_smps_type_request req_type,
892 enum ieee80211_smps_mode smps_request); 982 enum ieee80211_smps_mode smps_request);
893 983
984/* Low latency */
985int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
986 bool value);
987/* get SystemLowLatencyMode - only needed for beacon threshold? */
988bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
989/* get VMACLowLatencyMode */
990static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
991{
992 /*
993 * should this consider associated/active/... state?
994 *
995 * Normally low-latency should only be active on interfaces
996 * that are active, but at least with debugfs it can also be
997 * enabled on interfaces that aren't active. However, when
998 * interface aren't active then they aren't added into the
999 * binding, so this has no real impact. For now, just return
1000 * the current desired low-latency state.
1001 */
1002
1003 return mvmvif->low_latency;
1004}
1005
894/* Thermal management and CT-kill */ 1006/* Thermal management and CT-kill */
1007void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
895void iwl_mvm_tt_handler(struct iwl_mvm *mvm); 1008void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
896void iwl_mvm_tt_initialize(struct iwl_mvm *mvm); 1009void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
897void iwl_mvm_tt_exit(struct iwl_mvm *mvm); 1010void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
898void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); 1011void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
899 1012
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 35b71af78d02..cf2d09f53782 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -67,14 +67,6 @@
67#include "iwl-eeprom-read.h" 67#include "iwl-eeprom-read.h"
68#include "iwl-nvm-parse.h" 68#include "iwl-nvm-parse.h"
69 69
70/* list of NVM sections we are allowed/need to read */
71static const int nvm_to_read[] = {
72 NVM_SECTION_TYPE_HW,
73 NVM_SECTION_TYPE_SW,
74 NVM_SECTION_TYPE_CALIBRATION,
75 NVM_SECTION_TYPE_PRODUCTION,
76};
77
78/* Default NVM size to read */ 70/* Default NVM size to read */
79#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) 71#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
80#define IWL_MAX_NVM_SECTION_SIZE 7000 72#define IWL_MAX_NVM_SECTION_SIZE 7000
@@ -236,24 +228,39 @@ static struct iwl_nvm_data *
236iwl_parse_nvm_sections(struct iwl_mvm *mvm) 228iwl_parse_nvm_sections(struct iwl_mvm *mvm)
237{ 229{
238 struct iwl_nvm_section *sections = mvm->nvm_sections; 230 struct iwl_nvm_section *sections = mvm->nvm_sections;
239 const __le16 *hw, *sw, *calib; 231 const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
240 232
241 /* Checking for required sections */ 233 /* Checking for required sections */
242 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 234 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
243 !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) { 235 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
244 IWL_ERR(mvm, "Can't parse empty NVM sections\n"); 236 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
245 return NULL; 237 IWL_ERR(mvm, "Can't parse empty NVM sections\n");
238 return NULL;
239 }
240 } else {
241 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
242 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
243 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
244 IWL_ERR(mvm,
245 "Can't parse empty family 8000 NVM sections\n");
246 return NULL;
247 }
246 } 248 }
247 249
248 if (WARN_ON(!mvm->cfg)) 250 if (WARN_ON(!mvm->cfg))
249 return NULL; 251 return NULL;
250 252
251 hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data; 253 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
252 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; 254 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
253 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; 255 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
256 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
257 mac_override =
258 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
259
254 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, 260 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
255 iwl_fw_valid_tx_ant(mvm->fw), 261 regulatory, mac_override,
256 iwl_fw_valid_rx_ant(mvm->fw)); 262 mvm->fw->valid_tx_ant,
263 mvm->fw->valid_rx_ant);
257} 264}
258 265
259#define MAX_NVM_FILE_LEN 16384 266#define MAX_NVM_FILE_LEN 16384
@@ -293,6 +300,8 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
293 300
294#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) 301#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
295#define NVM_WORD2_ID(x) (x >> 12) 302#define NVM_WORD2_ID(x) (x >> 12)
303#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
304#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
296 305
297 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); 306 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
298 307
@@ -343,8 +352,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
343 break; 352 break;
344 } 353 }
345 354
346 section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); 355 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
347 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); 356 section_size =
357 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
358 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
359 } else {
360 section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
361 le16_to_cpu(file_sec->word2));
362 section_id = NVM_WORD1_ID_FAMILY_8000(
363 le16_to_cpu(file_sec->word1));
364 }
348 365
349 if (section_size > IWL_MAX_NVM_SECTION_SIZE) { 366 if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
350 IWL_ERR(mvm, "ERROR - section too large (%d)\n", 367 IWL_ERR(mvm, "ERROR - section too large (%d)\n",
@@ -367,7 +384,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
367 break; 384 break;
368 } 385 }
369 386
370 if (WARN(section_id >= NVM_NUM_OF_SECTIONS, 387 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
371 "Invalid NVM section ID %d\n", section_id)) { 388 "Invalid NVM section ID %d\n", section_id)) {
372 ret = -EINVAL; 389 ret = -EINVAL;
373 break; 390 break;
@@ -414,6 +431,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
414{ 431{
415 int ret, i, section; 432 int ret, i, section;
416 u8 *nvm_buffer, *temp; 433 u8 *nvm_buffer, *temp;
434 int nvm_to_read[NVM_MAX_NUM_SECTIONS];
435 int num_of_sections_to_read;
436
437 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
438 return -EINVAL;
417 439
418 /* load external NVM if configured */ 440 /* load external NVM if configured */
419 if (iwlwifi_mod_params.nvm_file) { 441 if (iwlwifi_mod_params.nvm_file) {
@@ -422,6 +444,22 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
422 if (ret) 444 if (ret)
423 return ret; 445 return ret;
424 } else { 446 } else {
447 /* list of NVM sections we are allowed/need to read */
448 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
449 nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
450 nvm_to_read[1] = NVM_SECTION_TYPE_SW;
451 nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
452 nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
453 num_of_sections_to_read = 4;
454 } else {
455 nvm_to_read[0] = NVM_SECTION_TYPE_SW;
456 nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
457 nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
458 nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
459 nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
460 num_of_sections_to_read = 5;
461 }
462
425 /* Read From FW NVM */ 463 /* Read From FW NVM */
426 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); 464 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
427 465
@@ -430,7 +468,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
430 GFP_KERNEL); 468 GFP_KERNEL);
431 if (!nvm_buffer) 469 if (!nvm_buffer)
432 return -ENOMEM; 470 return -ENOMEM;
433 for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) { 471 for (i = 0; i < num_of_sections_to_read; i++) {
434 section = nvm_to_read[i]; 472 section = nvm_to_read[i];
435 /* we override the constness for initial read */ 473 /* we override the constness for initial read */
436 ret = iwl_nvm_read_section(mvm, section, nvm_buffer); 474 ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
@@ -446,10 +484,6 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
446 484
447#ifdef CONFIG_IWLWIFI_DEBUGFS 485#ifdef CONFIG_IWLWIFI_DEBUGFS
448 switch (section) { 486 switch (section) {
449 case NVM_SECTION_TYPE_HW:
450 mvm->nvm_hw_blob.data = temp;
451 mvm->nvm_hw_blob.size = ret;
452 break;
453 case NVM_SECTION_TYPE_SW: 487 case NVM_SECTION_TYPE_SW:
454 mvm->nvm_sw_blob.data = temp; 488 mvm->nvm_sw_blob.data = temp;
455 mvm->nvm_sw_blob.size = ret; 489 mvm->nvm_sw_blob.size = ret;
@@ -463,6 +497,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
463 mvm->nvm_prod_blob.size = ret; 497 mvm->nvm_prod_blob.size = ret;
464 break; 498 break;
465 default: 499 default:
500 if (section == mvm->cfg->nvm_hw_section_num) {
501 mvm->nvm_hw_blob.data = temp;
502 mvm->nvm_hw_blob.size = ret;
503 break;
504 }
466 WARN(1, "section: %d", section); 505 WARN(1, "section: %d", section);
467 } 506 }
468#endif 507#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c
new file mode 100644
index 000000000000..9bfb95e89cfb
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c
@@ -0,0 +1,215 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <net/ipv6.h>
64#include <net/addrconf.h>
65#include "mvm.h"
66
67void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
68 struct iwl_wowlan_config_cmd_v2 *cmd)
69{
70 int i;
71
72 /*
73 * For QoS counters, we store the one to use next, so subtract 0x10
74 * since the uCode will add 0x10 *before* using the value while we
75 * increment after using the value (i.e. store the next value to use).
76 */
77 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
78 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
79 seq -= 0x10;
80 cmd->qos_seq[i] = cpu_to_le16(seq);
81 }
82}
83
84int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
85 struct ieee80211_vif *vif,
86 bool disable_offloading,
87 u32 cmd_flags)
88{
89 union {
90 struct iwl_proto_offload_cmd_v1 v1;
91 struct iwl_proto_offload_cmd_v2 v2;
92 struct iwl_proto_offload_cmd_v3_small v3s;
93 struct iwl_proto_offload_cmd_v3_large v3l;
94 } cmd = {};
95 struct iwl_host_cmd hcmd = {
96 .id = PROT_OFFLOAD_CONFIG_CMD,
97 .flags = cmd_flags,
98 .data[0] = &cmd,
99 .dataflags[0] = IWL_HCMD_DFL_DUP,
100 };
101 struct iwl_proto_offload_cmd_common *common;
102 u32 enabled = 0, size;
103 u32 capa_flags = mvm->fw->ucode_capa.flags;
104#if IS_ENABLED(CONFIG_IPV6)
105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
106 int i;
107
108 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
109 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
110 struct iwl_ns_config *nsc;
111 struct iwl_targ_addr *addrs;
112 int n_nsc, n_addrs;
113 int c;
114
115 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
116 nsc = cmd.v3s.ns_config;
117 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
118 addrs = cmd.v3s.targ_addrs;
119 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
120 } else {
121 nsc = cmd.v3l.ns_config;
122 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
123 addrs = cmd.v3l.targ_addrs;
124 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
125 }
126
127 if (mvmvif->num_target_ipv6_addrs)
128 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
129
130 /*
131 * For each address we have (and that will fit) fill a target
132 * address struct and combine for NS offload structs with the
133 * solicited node addresses.
134 */
135 for (i = 0, c = 0;
136 i < mvmvif->num_target_ipv6_addrs &&
137 i < n_addrs && c < n_nsc; i++) {
138 struct in6_addr solicited_addr;
139 int j;
140
141 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
142 &solicited_addr);
143 for (j = 0; j < c; j++)
144 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
145 &solicited_addr) == 0)
146 break;
147 if (j == c)
148 c++;
149 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
150 addrs[i].config_num = cpu_to_le32(j);
151 nsc[j].dest_ipv6_addr = solicited_addr;
152 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
153 }
154
155 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
156 cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
157 else
158 cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
159 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
160 if (mvmvif->num_target_ipv6_addrs) {
161 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
162 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
163 }
164
165 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
166 sizeof(mvmvif->target_ipv6_addrs[0]));
167
168 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
169 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
170 memcpy(cmd.v2.target_ipv6_addr[i],
171 &mvmvif->target_ipv6_addrs[i],
172 sizeof(cmd.v2.target_ipv6_addr[i]));
173 } else {
174 if (mvmvif->num_target_ipv6_addrs) {
175 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
176 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
177 }
178
179 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
180 sizeof(mvmvif->target_ipv6_addrs[0]));
181
182 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
183 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
184 memcpy(cmd.v1.target_ipv6_addr[i],
185 &mvmvif->target_ipv6_addrs[i],
186 sizeof(cmd.v1.target_ipv6_addr[i]));
187 }
188#endif
189
190 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
191 common = &cmd.v3s.common;
192 size = sizeof(cmd.v3s);
193 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
194 common = &cmd.v3l.common;
195 size = sizeof(cmd.v3l);
196 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
197 common = &cmd.v2.common;
198 size = sizeof(cmd.v2);
199 } else {
200 common = &cmd.v1.common;
201 size = sizeof(cmd.v1);
202 }
203
204 if (vif->bss_conf.arp_addr_cnt) {
205 enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
206 common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
207 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
208 }
209
210 if (!disable_offloading)
211 common->enabled = cpu_to_le32(enabled);
212
213 hcmd.len[0] = size;
214 return iwl_mvm_send_cmd(mvm, &hcmd);
215}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a3d43de342d7..9545d7fdd4bf 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -61,6 +61,7 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/module.h> 63#include <linux/module.h>
64#include <linux/vmalloc.h>
64#include <net/mac80211.h> 65#include <net/mac80211.h>
65 66
66#include "iwl-notif-wait.h" 67#include "iwl-notif-wait.h"
@@ -78,6 +79,7 @@
78#include "iwl-prph.h" 79#include "iwl-prph.h"
79#include "rs.h" 80#include "rs.h"
80#include "fw-api-scan.h" 81#include "fw-api-scan.h"
82#include "fw-error-dump.h"
81#include "time-event.h" 83#include "time-event.h"
82 84
83/* 85/*
@@ -185,9 +187,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
185 * (PCIe power is lost before PERST# is asserted), causing ME FW 187 * (PCIe power is lost before PERST# is asserted), causing ME FW
186 * to lose ownership and not being able to obtain it back. 188 * to lose ownership and not being able to obtain it back.
187 */ 189 */
188 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, 190 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
189 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 191 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
190 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 192 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
193 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
191} 194}
192 195
193struct iwl_rx_handlers { 196struct iwl_rx_handlers {
@@ -219,13 +222,17 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
219 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), 222 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
220 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false), 223 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
221 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), 224 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
225 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
226 iwl_mvm_rx_ant_coupling_notif, true),
222 227
223 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), 228 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
224 229
230 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
231
225 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), 232 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
226 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false), 233 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
227 RX_HANDLER(SCAN_OFFLOAD_COMPLETE, 234 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
228 iwl_mvm_rx_scan_offload_complete_notif, false), 235 iwl_mvm_rx_scan_offload_complete_notif, true),
229 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results, 236 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
230 false), 237 false),
231 238
@@ -242,7 +249,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
242#undef RX_HANDLER 249#undef RX_HANDLER
243#define CMD(x) [x] = #x 250#define CMD(x) [x] = #x
244 251
245static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { 252static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
246 CMD(MVM_ALIVE), 253 CMD(MVM_ALIVE),
247 CMD(REPLY_ERROR), 254 CMD(REPLY_ERROR),
248 CMD(INIT_COMPLETE_NOTIF), 255 CMD(INIT_COMPLETE_NOTIF),
@@ -284,9 +291,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
284 CMD(BEACON_NOTIFICATION), 291 CMD(BEACON_NOTIFICATION),
285 CMD(BEACON_TEMPLATE_CMD), 292 CMD(BEACON_TEMPLATE_CMD),
286 CMD(STATISTICS_NOTIFICATION), 293 CMD(STATISTICS_NOTIFICATION),
294 CMD(EOSP_NOTIFICATION),
287 CMD(REDUCE_TX_POWER_CMD), 295 CMD(REDUCE_TX_POWER_CMD),
288 CMD(TX_ANT_CONFIGURATION_CMD), 296 CMD(TX_ANT_CONFIGURATION_CMD),
289 CMD(D3_CONFIG_CMD), 297 CMD(D3_CONFIG_CMD),
298 CMD(D0I3_END_CMD),
290 CMD(PROT_OFFLOAD_CONFIG_CMD), 299 CMD(PROT_OFFLOAD_CONFIG_CMD),
291 CMD(OFFLOADS_QUERY_CMD), 300 CMD(OFFLOADS_QUERY_CMD),
292 CMD(REMOTE_WAKE_CONFIG_CMD), 301 CMD(REMOTE_WAKE_CONFIG_CMD),
@@ -309,17 +318,37 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
309 CMD(BT_PROFILE_NOTIFICATION), 318 CMD(BT_PROFILE_NOTIFICATION),
310 CMD(BT_CONFIG), 319 CMD(BT_CONFIG),
311 CMD(MCAST_FILTER_CMD), 320 CMD(MCAST_FILTER_CMD),
321 CMD(BCAST_FILTER_CMD),
312 CMD(REPLY_SF_CFG_CMD), 322 CMD(REPLY_SF_CFG_CMD),
313 CMD(REPLY_BEACON_FILTERING_CMD), 323 CMD(REPLY_BEACON_FILTERING_CMD),
314 CMD(REPLY_THERMAL_MNG_BACKOFF), 324 CMD(REPLY_THERMAL_MNG_BACKOFF),
315 CMD(MAC_PM_POWER_TABLE), 325 CMD(MAC_PM_POWER_TABLE),
316 CMD(BT_COEX_CI), 326 CMD(BT_COEX_CI),
317 CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), 327 CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
328 CMD(ANTENNA_COUPLING_NOTIFICATION),
318}; 329};
319#undef CMD 330#undef CMD
320 331
321/* this forward declaration can avoid to export the function */ 332/* this forward declaration can avoid to export the function */
322static void iwl_mvm_async_handlers_wk(struct work_struct *wk); 333static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
334static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
335
336static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
337{
338 const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
339
340 if (!pwr_tx_backoff)
341 return 0;
342
343 while (pwr_tx_backoff->pwr) {
344 if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
345 return pwr_tx_backoff->backoff;
346
347 pwr_tx_backoff++;
348 }
349
350 return 0;
351}
323 352
324static struct iwl_op_mode * 353static struct iwl_op_mode *
325iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, 354iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
@@ -333,6 +362,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
333 TX_CMD, 362 TX_CMD,
334 }; 363 };
335 int err, scan_size; 364 int err, scan_size;
365 u32 min_backoff;
366
367 /*
368 * We use IWL_MVM_STATION_COUNT to check the validity of the station
369 * index all over the driver - check that its value corresponds to the
370 * array size.
371 */
372 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
336 373
337 /******************************** 374 /********************************
338 * 1. Allocating and configuring HW data 375 * 1. Allocating and configuring HW data
@@ -373,6 +410,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
373 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); 410 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
374 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); 411 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
375 INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); 412 INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
413 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
414
415 spin_lock_init(&mvm->d0i3_tx_lock);
416 skb_queue_head_init(&mvm->d0i3_tx);
417 init_waitqueue_head(&mvm->d0i3_exit_waitq);
376 418
377 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); 419 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
378 420
@@ -421,7 +463,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
421 IWL_INFO(mvm, "Detected %s, REV=0x%X\n", 463 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
422 mvm->cfg->name, mvm->trans->hw_rev); 464 mvm->cfg->name, mvm->trans->hw_rev);
423 465
424 iwl_mvm_tt_initialize(mvm); 466 min_backoff = calc_min_backoff(trans, cfg);
467 iwl_mvm_tt_initialize(mvm, min_backoff);
425 468
426 /* 469 /*
427 * If the NVM exists in an external file, 470 * If the NVM exists in an external file,
@@ -462,13 +505,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
462 if (err) 505 if (err)
463 goto out_unregister; 506 goto out_unregister;
464 507
465 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
466 mvm->pm_ops = &pm_mac_ops;
467 else
468 mvm->pm_ops = &pm_legacy_ops;
469
470 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); 508 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
471 509
510 /* rpm starts with a taken ref. only set the appropriate bit here. */
511 set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
512
472 return op_mode; 513 return op_mode;
473 514
474 out_unregister: 515 out_unregister:
@@ -495,6 +536,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
495 ieee80211_unregister_hw(mvm->hw); 536 ieee80211_unregister_hw(mvm->hw);
496 537
497 kfree(mvm->scan_cmd); 538 kfree(mvm->scan_cmd);
539 vfree(mvm->fw_error_dump);
540 kfree(mvm->fw_error_sram);
498 kfree(mvm->mcast_filter_cmd); 541 kfree(mvm->mcast_filter_cmd);
499 mvm->mcast_filter_cmd = NULL; 542 mvm->mcast_filter_cmd = NULL;
500 543
@@ -508,7 +551,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
508 mvm->phy_db = NULL; 551 mvm->phy_db = NULL;
509 552
510 iwl_free_nvm_data(mvm->nvm_data); 553 iwl_free_nvm_data(mvm->nvm_data);
511 for (i = 0; i < NVM_NUM_OF_SECTIONS; i++) 554 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
512 kfree(mvm->nvm_sections[i].data); 555 kfree(mvm->nvm_sections[i].data);
513 556
514 ieee80211_free_hw(mvm->hw); 557 ieee80211_free_hw(mvm->hw);
@@ -658,7 +701,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
658 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); 701 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
659} 702}
660 703
661static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 704static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
662{ 705{
663 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 706 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
664 707
@@ -667,9 +710,9 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
667 else 710 else
668 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); 711 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
669 712
670 if (state && mvm->cur_ucode != IWL_UCODE_INIT)
671 iwl_trans_stop_device(mvm->trans);
672 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); 713 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
714
715 return state && mvm->cur_ucode != IWL_UCODE_INIT;
673} 716}
674 717
675static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 718static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
@@ -703,6 +746,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
703 iwl_abort_notification_waits(&mvm->notif_wait); 746 iwl_abort_notification_waits(&mvm->notif_wait);
704 747
705 /* 748 /*
749 * This is a bit racy, but worst case we tell mac80211 about
750 * a stopped/aborted scan when that was already done which
751 * is not a problem. It is necessary to abort any os scan
752 * here because mac80211 requires having the scan cleared
753 * before restarting.
754 * We'll reset the scan_status to NONE in restart cleanup in
755 * the next start() call from mac80211. If restart isn't called
756 * (no fw restart) scan status will stay busy.
757 */
758 switch (mvm->scan_status) {
759 case IWL_MVM_SCAN_NONE:
760 break;
761 case IWL_MVM_SCAN_OS:
762 ieee80211_scan_completed(mvm->hw, true);
763 break;
764 case IWL_MVM_SCAN_SCHED:
765 /* Sched scan will be restarted by mac80211 in restart_hw. */
766 if (!mvm->restart_fw)
767 ieee80211_sched_scan_stopped(mvm->hw);
768 break;
769 }
770
771 /*
706 * If we're restarting already, don't cycle restarts. 772 * If we're restarting already, don't cycle restarts.
707 * If INIT fw asserted, it will likely fail again. 773 * If INIT fw asserted, it will likely fail again.
708 * If WoWLAN fw asserted, don't restart either, mac80211 774 * If WoWLAN fw asserted, don't restart either, mac80211
@@ -733,25 +799,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
733 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); 799 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
734 schedule_work(&reprobe->work); 800 schedule_work(&reprobe->work);
735 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) { 801 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
736 /* 802 /* don't let the transport/FW power down */
737 * This is a bit racy, but worst case we tell mac80211 about 803 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
738 * a stopped/aborted (sched) scan when that was already done
739 * which is not a problem. It is necessary to abort any scan
740 * here because mac80211 requires having the scan cleared
741 * before restarting.
742 * We'll reset the scan_status to NONE in restart cleanup in
743 * the next start() call from mac80211.
744 */
745 switch (mvm->scan_status) {
746 case IWL_MVM_SCAN_NONE:
747 break;
748 case IWL_MVM_SCAN_OS:
749 ieee80211_scan_completed(mvm->hw, true);
750 break;
751 case IWL_MVM_SCAN_SCHED:
752 ieee80211_sched_scan_stopped(mvm->hw);
753 break;
754 }
755 804
756 if (mvm->restart_fw > 0) 805 if (mvm->restart_fw > 0)
757 mvm->restart_fw--; 806 mvm->restart_fw--;
@@ -759,13 +808,52 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
759 } 808 }
760} 809}
761 810
811#ifdef CONFIG_IWLWIFI_DEBUGFS
812void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
813{
814 struct iwl_fw_error_dump_file *dump_file;
815 struct iwl_fw_error_dump_data *dump_data;
816 u32 file_len;
817
818 lockdep_assert_held(&mvm->mutex);
819
820 if (mvm->fw_error_dump)
821 return;
822
823 file_len = mvm->fw_error_sram_len +
824 sizeof(*dump_file) +
825 sizeof(*dump_data);
826
827 dump_file = vmalloc(file_len);
828 if (!dump_file)
829 return;
830
831 mvm->fw_error_dump = dump_file;
832
833 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
834 dump_file->file_len = cpu_to_le32(file_len);
835 dump_data = (void *)dump_file->data;
836 dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
837 dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
838
839 /*
840 * No need for lock since at the stage the FW isn't loaded. So it
841 * can't assert - we are the only one who can possibly be accessing
842 * mvm->fw_error_sram right now.
843 */
844 memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
845}
846#endif
847
762static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) 848static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
763{ 849{
764 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 850 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
765 851
766 iwl_mvm_dump_nic_error_log(mvm); 852 iwl_mvm_dump_nic_error_log(mvm);
767 if (!mvm->restart_fw) 853
768 iwl_mvm_dump_sram(mvm); 854#ifdef CONFIG_IWLWIFI_DEBUGFS
855 iwl_mvm_fw_error_sram_dump(mvm);
856#endif
769 857
770 iwl_mvm_nic_restart(mvm); 858 iwl_mvm_nic_restart(mvm);
771} 859}
@@ -778,6 +866,323 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
778 iwl_mvm_nic_restart(mvm); 866 iwl_mvm_nic_restart(mvm);
779} 867}
780 868
869struct iwl_d0i3_iter_data {
870 struct iwl_mvm *mvm;
871 u8 ap_sta_id;
872 u8 vif_count;
873 u8 offloading_tid;
874 bool disable_offloading;
875};
876
877static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
878 struct ieee80211_vif *vif,
879 struct iwl_d0i3_iter_data *iter_data)
880{
881 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
882 struct ieee80211_sta *ap_sta;
883 struct iwl_mvm_sta *mvmsta;
884 u32 available_tids = 0;
885 u8 tid;
886
887 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
888 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
889 return false;
890
891 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
892 if (IS_ERR_OR_NULL(ap_sta))
893 return false;
894
895 mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
896 spin_lock_bh(&mvmsta->lock);
897 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
898 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
899
900 /*
901 * in case of pending tx packets, don't use this tid
902 * for offloading in order to prevent reuse of the same
903 * qos seq counters.
904 */
905 if (iwl_mvm_tid_queued(tid_data))
906 continue;
907
908 if (tid_data->state != IWL_AGG_OFF)
909 continue;
910
911 available_tids |= BIT(tid);
912 }
913 spin_unlock_bh(&mvmsta->lock);
914
915 /*
916 * disallow protocol offloading if we have no available tid
917 * (with no pending frames and no active aggregation,
918 * as we don't handle "holes" properly - the scheduler needs the
919 * frame's seq number and TFD index to match)
920 */
921 if (!available_tids)
922 return true;
923
924 /* for simplicity, just use the first available tid */
925 iter_data->offloading_tid = ffs(available_tids) - 1;
926 return false;
927}
928
929static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
930 struct ieee80211_vif *vif)
931{
932 struct iwl_d0i3_iter_data *data = _data;
933 struct iwl_mvm *mvm = data->mvm;
934 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
935 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
936
937 IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
938 if (vif->type != NL80211_IFTYPE_STATION ||
939 !vif->bss_conf.assoc)
940 return;
941
942 /*
943 * in case of pending tx packets or active aggregations,
944 * avoid offloading features in order to prevent reuse of
945 * the same qos seq counters.
946 */
947 if (iwl_mvm_disallow_offloading(mvm, vif, data))
948 data->disable_offloading = true;
949
950 iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
951 iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
952
953 /*
954 * on init/association, mvm already configures POWER_TABLE_CMD
955 * and REPLY_MCAST_FILTER_CMD, so currently don't
956 * reconfigure them (we might want to use different
957 * params later on, though).
958 */
959 data->ap_sta_id = mvmvif->ap_sta_id;
960 data->vif_count++;
961}
962
963static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
964 struct iwl_wowlan_config_cmd_v3 *cmd,
965 struct iwl_d0i3_iter_data *iter_data)
966{
967 struct ieee80211_sta *ap_sta;
968 struct iwl_mvm_sta *mvm_ap_sta;
969
970 if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
971 return;
972
973 rcu_read_lock();
974
975 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
976 if (IS_ERR_OR_NULL(ap_sta))
977 goto out;
978
979 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
980 cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
981 cmd->offloading_tid = iter_data->offloading_tid;
982
983 /*
984 * The d0i3 uCode takes care of the nonqos counters,
985 * so configure only the qos seq ones.
986 */
987 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
988out:
989 rcu_read_unlock();
990}
991static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
992{
993 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
994 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
995 int ret;
996 struct iwl_d0i3_iter_data d0i3_iter_data = {
997 .mvm = mvm,
998 };
999 struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
1000 .common = {
1001 .wakeup_filter =
1002 cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1003 IWL_WOWLAN_WAKEUP_BEACON_MISS |
1004 IWL_WOWLAN_WAKEUP_LINK_CHANGE |
1005 IWL_WOWLAN_WAKEUP_BCN_FILTERING),
1006 },
1007 };
1008 struct iwl_d3_manager_config d3_cfg_cmd = {
1009 .min_sleep_time = cpu_to_le32(1000),
1010 };
1011
1012 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
1013
1014 /* make sure we have no running tx while configuring the qos */
1015 set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1016 synchronize_net();
1017
1018 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1019 IEEE80211_IFACE_ITER_NORMAL,
1020 iwl_mvm_enter_d0i3_iterator,
1021 &d0i3_iter_data);
1022 if (d0i3_iter_data.vif_count == 1) {
1023 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
1024 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
1025 } else {
1026 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1027 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1028 mvm->d0i3_offloading = false;
1029 }
1030
1031 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
1032 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1033 sizeof(wowlan_config_cmd),
1034 &wowlan_config_cmd);
1035 if (ret)
1036 return ret;
1037
1038 return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1039 flags | CMD_MAKE_TRANS_IDLE,
1040 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1041}
1042
1043static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1044 struct ieee80211_vif *vif)
1045{
1046 struct iwl_mvm *mvm = _data;
1047 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1048
1049 IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1050 if (vif->type != NL80211_IFTYPE_STATION ||
1051 !vif->bss_conf.assoc)
1052 return;
1053
1054 iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1055}
1056
1057static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
1058 struct ieee80211_vif *vif)
1059{
1060 struct iwl_mvm *mvm = data;
1061 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1062
1063 if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
1064 mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1065 ieee80211_connection_loss(vif);
1066}
1067
1068void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1069{
1070 struct ieee80211_sta *sta = NULL;
1071 struct iwl_mvm_sta *mvm_ap_sta;
1072 int i;
1073 bool wake_queues = false;
1074
1075 lockdep_assert_held(&mvm->mutex);
1076
1077 spin_lock_bh(&mvm->d0i3_tx_lock);
1078
1079 if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
1080 goto out;
1081
1082 IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1083
1084 /* get the sta in order to update seq numbers and re-enqueue skbs */
1085 sta = rcu_dereference_protected(
1086 mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1087 lockdep_is_held(&mvm->mutex));
1088
1089 if (IS_ERR_OR_NULL(sta)) {
1090 sta = NULL;
1091 goto out;
1092 }
1093
1094 if (mvm->d0i3_offloading && qos_seq) {
1095 /* update qos seq numbers if offloading was enabled */
1096 mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
1097 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1098 u16 seq = le16_to_cpu(qos_seq[i]);
1099 /* firmware stores last-used one, we store next one */
1100 seq += 0x10;
1101 mvm_ap_sta->tid_data[i].seq_number = seq;
1102 }
1103 }
1104out:
1105 /* re-enqueue (or drop) all packets */
1106 while (!skb_queue_empty(&mvm->d0i3_tx)) {
1107 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1108
1109 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1110 ieee80211_free_txskb(mvm->hw, skb);
1111
1112 /* if the skb_queue is not empty, we need to wake queues */
1113 wake_queues = true;
1114 }
1115 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1116 wake_up(&mvm->d0i3_exit_waitq);
1117 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1118 if (wake_queues)
1119 ieee80211_wake_queues(mvm->hw);
1120
1121 spin_unlock_bh(&mvm->d0i3_tx_lock);
1122}
1123
1124static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1125{
1126 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1127 struct iwl_host_cmd get_status_cmd = {
1128 .id = WOWLAN_GET_STATUSES,
1129 .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
1130 };
1131 struct iwl_wowlan_status_v6 *status;
1132 int ret;
1133 u32 disconnection_reasons, wakeup_reasons;
1134 __le16 *qos_seq = NULL;
1135
1136 mutex_lock(&mvm->mutex);
1137 ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
1138 if (ret)
1139 goto out;
1140
1141 if (!get_status_cmd.resp_pkt)
1142 goto out;
1143
1144 status = (void *)get_status_cmd.resp_pkt->data;
1145 wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
1146 qos_seq = status->qos_seq_ctr;
1147
1148 IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1149
1150 disconnection_reasons =
1151 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1152 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1153 if (wakeup_reasons & disconnection_reasons)
1154 ieee80211_iterate_active_interfaces(
1155 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1156 iwl_mvm_d0i3_disconnect_iter, mvm);
1157
1158 iwl_free_resp(&get_status_cmd);
1159out:
1160 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1161 mutex_unlock(&mvm->mutex);
1162}
1163
1164static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1165{
1166 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1167 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1168 CMD_WAKE_UP_TRANS;
1169 int ret;
1170
1171 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1172
1173 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1174 if (ret)
1175 goto out;
1176
1177 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1178 IEEE80211_IFACE_ITER_NORMAL,
1179 iwl_mvm_exit_d0i3_iterator,
1180 mvm);
1181out:
1182 schedule_work(&mvm->d0i3_exit_work);
1183 return ret;
1184}
1185
781static const struct iwl_op_mode_ops iwl_mvm_ops = { 1186static const struct iwl_op_mode_ops iwl_mvm_ops = {
782 .start = iwl_op_mode_mvm_start, 1187 .start = iwl_op_mode_mvm_start,
783 .stop = iwl_op_mode_mvm_stop, 1188 .stop = iwl_op_mode_mvm_stop,
@@ -789,4 +1194,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
789 .nic_error = iwl_mvm_nic_error, 1194 .nic_error = iwl_mvm_nic_error,
790 .cmd_queue_full = iwl_mvm_cmd_queue_full, 1195 .cmd_queue_full = iwl_mvm_cmd_queue_full,
791 .nic_config = iwl_mvm_nic_config, 1196 .nic_config = iwl_mvm_nic_config,
1197 .enter_d0i3 = iwl_mvm_enter_d0i3,
1198 .exit_d0i3 = iwl_mvm_exit_d0i3,
792}; 1199};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index b7268c0b3333..237efe0ac1c4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -156,13 +156,13 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
156 idle_cnt = chains_static; 156 idle_cnt = chains_static;
157 active_cnt = chains_dynamic; 157 active_cnt = chains_dynamic;
158 158
159 cmd->rxchain_info = cpu_to_le32(iwl_fw_valid_rx_ant(mvm->fw) << 159 cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
160 PHY_RX_CHAIN_VALID_POS); 160 PHY_RX_CHAIN_VALID_POS);
161 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); 161 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
162 cmd->rxchain_info |= cpu_to_le32(active_cnt << 162 cmd->rxchain_info |= cpu_to_le32(active_cnt <<
163 PHY_RX_CHAIN_MIMO_CNT_POS); 163 PHY_RX_CHAIN_MIMO_CNT_POS);
164 164
165 cmd->txchain_info = cpu_to_le32(iwl_fw_valid_tx_ant(mvm->fw)); 165 cmd->txchain_info = cpu_to_le32(mvm->fw->valid_tx_ant);
166} 166}
167 167
168/* 168/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d9eab3b7bb9f..6b636eab3339 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -74,39 +74,36 @@
74 74
75#define POWER_KEEP_ALIVE_PERIOD_SEC 25 75#define POWER_KEEP_ALIVE_PERIOD_SEC 25
76 76
77static
77int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, 78int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
78 struct iwl_beacon_filter_cmd *cmd) 79 struct iwl_beacon_filter_cmd *cmd,
80 u32 flags)
79{ 81{
80 int ret; 82 IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
81 83 le32_to_cpu(cmd->ba_enable_beacon_abort));
82 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC, 84 IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
83 sizeof(struct iwl_beacon_filter_cmd), cmd); 85 le32_to_cpu(cmd->ba_escape_timer));
84 86 IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
85 if (!ret) { 87 le32_to_cpu(cmd->bf_debug_flag));
86 IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", 88 IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
87 le32_to_cpu(cmd->ba_enable_beacon_abort)); 89 le32_to_cpu(cmd->bf_enable_beacon_filter));
88 IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", 90 IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
89 le32_to_cpu(cmd->ba_escape_timer)); 91 le32_to_cpu(cmd->bf_energy_delta));
90 IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", 92 IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
91 le32_to_cpu(cmd->bf_debug_flag)); 93 le32_to_cpu(cmd->bf_escape_timer));
92 IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", 94 IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
93 le32_to_cpu(cmd->bf_enable_beacon_filter)); 95 le32_to_cpu(cmd->bf_roaming_energy_delta));
94 IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", 96 IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
95 le32_to_cpu(cmd->bf_energy_delta)); 97 le32_to_cpu(cmd->bf_roaming_state));
96 IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", 98 IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
97 le32_to_cpu(cmd->bf_escape_timer)); 99 le32_to_cpu(cmd->bf_temp_threshold));
98 IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", 100 IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
99 le32_to_cpu(cmd->bf_roaming_energy_delta)); 101 le32_to_cpu(cmd->bf_temp_fast_filter));
100 IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", 102 IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
101 le32_to_cpu(cmd->bf_roaming_state)); 103 le32_to_cpu(cmd->bf_temp_slow_filter));
102 IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", 104
103 le32_to_cpu(cmd->bf_temp_threshold)); 105 return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
104 IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", 106 sizeof(struct iwl_beacon_filter_cmd), cmd);
105 le32_to_cpu(cmd->bf_temp_fast_filter));
106 IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
107 le32_to_cpu(cmd->bf_temp_slow_filter));
108 }
109 return ret;
110} 107}
111 108
112static 109static
@@ -145,7 +142,7 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
145 mvmvif->bf_data.ba_enabled = enable; 142 mvmvif->bf_data.ba_enabled = enable;
146 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd); 143 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
147 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); 144 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
148 return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); 145 return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
149} 146}
150 147
151static void iwl_mvm_power_log(struct iwl_mvm *mvm, 148static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -301,8 +298,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
301 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); 298 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
302 cmd->keep_alive_seconds = cpu_to_le16(keep_alive); 299 cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
303 300
304 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM || 301 if (mvm->ps_disabled)
305 mvm->ps_prevented)
306 return; 302 return;
307 303
308 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 304 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -312,7 +308,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
312 mvmvif->dbgfs_pm.disable_power_off) 308 mvmvif->dbgfs_pm.disable_power_off)
313 cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); 309 cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
314#endif 310#endif
315 if (!vif->bss_conf.ps || mvmvif->pm_prevented) 311 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
312 mvm->pm_disabled)
316 return; 313 return;
317 314
318 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); 315 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -419,72 +416,44 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
419#endif /* CONFIG_IWLWIFI_DEBUGFS */ 416#endif /* CONFIG_IWLWIFI_DEBUGFS */
420} 417}
421 418
422static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm, 419static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
423 struct ieee80211_vif *vif) 420 struct ieee80211_vif *vif)
424{ 421{
425 int ret;
426 bool ba_enable;
427 struct iwl_mac_power_cmd cmd = {}; 422 struct iwl_mac_power_cmd cmd = {};
428 423
429 if (vif->type != NL80211_IFTYPE_STATION) 424 if (vif->type != NL80211_IFTYPE_STATION)
430 return 0; 425 return 0;
431 426
432 if (vif->p2p && 427 if (vif->p2p &&
433 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)) 428 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
434 return 0; 429 return 0;
435 430
436 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 431 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
437 iwl_mvm_power_log(mvm, &cmd); 432 iwl_mvm_power_log(mvm, &cmd);
438
439 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
440 sizeof(cmd), &cmd);
441 if (ret)
442 return ret;
443
444 ba_enable = !!(cmd.flags &
445 cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
446
447 return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
448}
449
450static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
451 struct ieee80211_vif *vif)
452{
453 struct iwl_mac_power_cmd cmd = {};
454 struct iwl_mvm_vif *mvmvif __maybe_unused =
455 iwl_mvm_vif_from_mac80211(vif);
456
457 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
458 return 0;
459
460 cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
461 mvmvif->color));
462
463 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
464 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
465
466#ifdef CONFIG_IWLWIFI_DEBUGFS 433#ifdef CONFIG_IWLWIFI_DEBUGFS
467 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && 434 memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
468 mvmvif->dbgfs_pm.disable_power_off)
469 cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
470#endif 435#endif
471 iwl_mvm_power_log(mvm, &cmd);
472 436
473 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC, 437 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
474 sizeof(cmd), &cmd); 438 sizeof(cmd), &cmd);
475} 439}
476 440
477static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable) 441int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
478{ 442{
479 struct iwl_device_power_cmd cmd = { 443 struct iwl_device_power_cmd cmd = {
480 .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK), 444 .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
481 }; 445 };
482 446
447 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
448 return 0;
449
483 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) 450 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
484 return 0; 451 return 0;
485 452
486 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM || 453 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
487 force_disable) 454 mvm->ps_disabled = true;
455
456 if (mvm->ps_disabled)
488 cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK); 457 cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
489 458
490#ifdef CONFIG_IWLWIFI_DEBUGFS 459#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -501,11 +470,6 @@ static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
501 &cmd); 470 &cmd);
502} 471}
503 472
504static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
505{
506 return _iwl_mvm_power_update_device(mvm, false);
507}
508
509void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 473void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
510{ 474{
511 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 475 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -544,44 +508,176 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
544 return 0; 508 return 0;
545} 509}
546 510
547static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac, 511struct iwl_power_constraint {
548 struct ieee80211_vif *vif) 512 struct ieee80211_vif *bf_vif;
513 struct ieee80211_vif *bss_vif;
514 struct ieee80211_vif *p2p_vif;
515 u16 bss_phyctx_id;
516 u16 p2p_phyctx_id;
517 bool pm_disabled;
518 bool ps_disabled;
519 struct iwl_mvm *mvm;
520};
521
522static void iwl_mvm_power_iterator(void *_data, u8 *mac,
523 struct ieee80211_vif *vif)
549{ 524{
550 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
551 struct iwl_mvm *mvm = _data; 526 struct iwl_power_constraint *power_iterator = _data;
552 int ret; 527 struct iwl_mvm *mvm = power_iterator->mvm;
528
529 switch (ieee80211_vif_type_p2p(vif)) {
530 case NL80211_IFTYPE_P2P_DEVICE:
531 break;
532
533 case NL80211_IFTYPE_P2P_GO:
534 case NL80211_IFTYPE_AP:
535 /* no BSS power mgmt if we have an active AP */
536 if (mvmvif->ap_ibss_active)
537 power_iterator->pm_disabled = true;
538 break;
539
540 case NL80211_IFTYPE_MONITOR:
541 /* no BSS power mgmt and no device power save */
542 power_iterator->pm_disabled = true;
543 power_iterator->ps_disabled = true;
544 break;
545
546 case NL80211_IFTYPE_P2P_CLIENT:
547 if (mvmvif->phy_ctxt)
548 power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
549
550 /* we should have only one P2P vif */
551 WARN_ON(power_iterator->p2p_vif);
552 power_iterator->p2p_vif = vif;
553
554 IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
555 power_iterator->p2p_phyctx_id,
556 power_iterator->bss_phyctx_id);
557 if (!(mvm->fw->ucode_capa.flags &
558 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
559 /* no BSS power mgmt if we have a P2P client*/
560 power_iterator->pm_disabled = true;
561 } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
562 power_iterator->bss_phyctx_id < MAX_PHYS &&
563 power_iterator->p2p_phyctx_id ==
564 power_iterator->bss_phyctx_id) {
565 power_iterator->pm_disabled = true;
566 }
567 break;
568
569 case NL80211_IFTYPE_STATION:
570 if (mvmvif->phy_ctxt)
571 power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
572
573 /* we should have only one BSS vif */
574 WARN_ON(power_iterator->bss_vif);
575 power_iterator->bss_vif = vif;
576
577 if (mvmvif->bf_data.bf_enabled &&
578 !WARN_ON(power_iterator->bf_vif))
579 power_iterator->bf_vif = vif;
580
581 IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
582 power_iterator->p2p_phyctx_id,
583 power_iterator->bss_phyctx_id);
584 if (mvm->fw->ucode_capa.flags &
585 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
586 (power_iterator->p2p_phyctx_id < MAX_PHYS &&
587 power_iterator->bss_phyctx_id < MAX_PHYS &&
588 power_iterator->p2p_phyctx_id ==
589 power_iterator->bss_phyctx_id))
590 power_iterator->pm_disabled = true;
591 break;
592
593 default:
594 break;
595 }
596}
553 597
554 mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true; 598static void
599iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
600 struct iwl_power_constraint *constraint)
601{
602 lockdep_assert_held(&mvm->mutex);
603
604 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
605 constraint->pm_disabled = true;
606 constraint->ps_disabled = true;
607 }
555 608
556 ret = iwl_mvm_power_mac_update_mode(mvm, vif); 609 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
557 WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n"); 610 IEEE80211_IFACE_ITER_NORMAL,
611 iwl_mvm_power_iterator, constraint);
558} 612}
559 613
560static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm, 614int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
561 struct ieee80211_vif *vif,
562 bool assign)
563{ 615{
616 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
617 struct iwl_power_constraint constraint = {
618 .p2p_phyctx_id = MAX_PHYS,
619 .bss_phyctx_id = MAX_PHYS,
620 .mvm = mvm,
621 };
622 bool ba_enable;
623 int ret;
624
625 lockdep_assert_held(&mvm->mutex);
626
627 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
628 return 0;
629
630 iwl_mvm_power_get_global_constraint(mvm, &constraint);
631 mvm->ps_disabled = constraint.ps_disabled;
632 mvm->pm_disabled = constraint.pm_disabled;
633
634 /* don't update device power state unless we add / remove monitor */
564 if (vif->type == NL80211_IFTYPE_MONITOR) { 635 if (vif->type == NL80211_IFTYPE_MONITOR) {
565 int ret = _iwl_mvm_power_update_device(mvm, assign); 636 ret = iwl_mvm_power_update_device(mvm);
566 mvm->ps_prevented = assign; 637 if (ret)
567 WARN_ONCE(ret, "Failed to update power device state\n"); 638 return ret;
568 } 639 }
569 640
570 ieee80211_iterate_active_interfaces(mvm->hw, 641 if (constraint.bss_vif) {
571 IEEE80211_IFACE_ITER_NORMAL, 642 ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
572 iwl_mvm_power_binding_iterator, 643 if (ret)
573 mvm); 644 return ret;
645 }
646
647 if (constraint.p2p_vif) {
648 ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
649 if (ret)
650 return ret;
651 }
652
653 if (!constraint.bf_vif)
654 return 0;
655
656 vif = constraint.bf_vif;
657 mvmvif = iwl_mvm_vif_from_mac80211(vif);
658
659 ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
660 !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
661
662 return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
574} 663}
575 664
576#ifdef CONFIG_IWLWIFI_DEBUGFS 665#ifdef CONFIG_IWLWIFI_DEBUGFS
577static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, 666int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
578 struct ieee80211_vif *vif, char *buf, 667 struct ieee80211_vif *vif, char *buf,
579 int bufsz) 668 int bufsz)
580{ 669{
670 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
581 struct iwl_mac_power_cmd cmd = {}; 671 struct iwl_mac_power_cmd cmd = {};
582 int pos = 0; 672 int pos = 0;
583 673
584 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 674 if (WARN_ON(!(mvm->fw->ucode_capa.flags &
675 IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
676 return 0;
677
678 mutex_lock(&mvm->mutex);
679 memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
680 mutex_unlock(&mvm->mutex);
585 681
586 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) 682 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
587 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", 683 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
@@ -685,32 +781,46 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
685} 781}
686#endif 782#endif
687 783
688int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, 784static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
689 struct ieee80211_vif *vif) 785 struct ieee80211_vif *vif,
786 struct iwl_beacon_filter_cmd *cmd,
787 u32 cmd_flags,
788 bool d0i3)
690{ 789{
691 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 790 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
692 struct iwl_beacon_filter_cmd cmd = {
693 IWL_BF_CMD_CONFIG_DEFAULTS,
694 .bf_enable_beacon_filter = cpu_to_le32(1),
695 };
696 int ret; 791 int ret;
697 792
698 if (mvmvif != mvm->bf_allowed_vif || 793 if (mvmvif != mvm->bf_allowed_vif ||
699 vif->type != NL80211_IFTYPE_STATION || vif->p2p) 794 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
700 return 0; 795 return 0;
701 796
702 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd); 797 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
703 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); 798 if (!d0i3)
704 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); 799 iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
800 ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
705 801
706 if (!ret) 802 /* don't change bf_enabled in case of temporary d0i3 configuration */
803 if (!ret && !d0i3)
707 mvmvif->bf_data.bf_enabled = true; 804 mvmvif->bf_data.bf_enabled = true;
708 805
709 return ret; 806 return ret;
710} 807}
711 808
809int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
810 struct ieee80211_vif *vif,
811 u32 flags)
812{
813 struct iwl_beacon_filter_cmd cmd = {
814 IWL_BF_CMD_CONFIG_DEFAULTS,
815 .bf_enable_beacon_filter = cpu_to_le32(1),
816 };
817
818 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
819}
820
712int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 821int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
713 struct ieee80211_vif *vif) 822 struct ieee80211_vif *vif,
823 u32 flags)
714{ 824{
715 struct iwl_beacon_filter_cmd cmd = {}; 825 struct iwl_beacon_filter_cmd cmd = {};
716 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 826 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -720,7 +830,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
720 vif->type != NL80211_IFTYPE_STATION || vif->p2p) 830 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
721 return 0; 831 return 0;
722 832
723 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); 833 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
724 834
725 if (!ret) 835 if (!ret)
726 mvmvif->bf_data.bf_enabled = false; 836 mvmvif->bf_data.bf_enabled = false;
@@ -728,23 +838,89 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
728 return ret; 838 return ret;
729} 839}
730 840
731int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm, 841int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
732 struct ieee80211_vif *vif) 842 struct ieee80211_vif *vif,
843 bool enable, u32 flags)
733{ 844{
845 int ret;
734 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 846 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
847 struct iwl_mac_power_cmd cmd = {};
735 848
736 if (!mvmvif->bf_data.bf_enabled) 849 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
737 return 0; 850 return 0;
738 851
739 return iwl_mvm_enable_beacon_filter(mvm, vif); 852 if (!vif->bss_conf.assoc)
740} 853 return 0;
741 854
742const struct iwl_mvm_power_ops pm_mac_ops = { 855 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
743 .power_update_mode = iwl_mvm_power_mac_update_mode, 856 if (enable) {
744 .power_update_device_mode = iwl_mvm_power_update_device, 857 /* configure skip over dtim up to 300 msec */
745 .power_disable = iwl_mvm_power_mac_disable, 858 int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
746 .power_update_binding = _iwl_mvm_power_update_binding, 859 int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
860
861 if (WARN_ON(!dtimper_msec))
862 return 0;
863
864 cmd.flags |=
865 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
866 cmd.skip_dtim_periods = 300 / dtimper_msec;
867 }
868 iwl_mvm_power_log(mvm, &cmd);
747#ifdef CONFIG_IWLWIFI_DEBUGFS 869#ifdef CONFIG_IWLWIFI_DEBUGFS
748 .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read, 870 memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
749#endif 871#endif
750}; 872 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
873 sizeof(cmd), &cmd);
874 if (ret)
875 return ret;
876
877 /* configure beacon filtering */
878 if (mvmvif != mvm->bf_allowed_vif)
879 return 0;
880
881 if (enable) {
882 struct iwl_beacon_filter_cmd cmd_bf = {
883 IWL_BF_CMD_CONFIG_D0I3,
884 .bf_enable_beacon_filter = cpu_to_le32(1),
885 };
886 ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
887 flags, true);
888 } else {
889 if (mvmvif->bf_data.bf_enabled)
890 ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
891 else
892 ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
893 }
894
895 return ret;
896}
897
898int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
899 struct ieee80211_vif *vif,
900 bool force,
901 u32 flags)
902{
903 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
904
905 if (mvmvif != mvm->bf_allowed_vif)
906 return 0;
907
908 if (!mvmvif->bf_data.bf_enabled) {
909 /* disable beacon filtering explicitly if force is true */
910 if (force)
911 return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
912 return 0;
913 }
914
915 return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
916}
917
918int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
919{
920 struct iwl_powertable_cmd cmd = {
921 .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
922 };
923
924 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
925 sizeof(cmd), &cmd);
926}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
deleted file mode 100644
index ef712ae5bc62..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ /dev/null
@@ -1,319 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-debug.h"
72#include "mvm.h"
73#include "iwl-modparams.h"
74#include "fw-api-power.h"
75
76#define POWER_KEEP_ALIVE_PERIOD_SEC 25
77
78static void iwl_mvm_power_log(struct iwl_mvm *mvm,
79 struct iwl_powertable_cmd *cmd)
80{
81 IWL_DEBUG_POWER(mvm,
82 "Sending power table command for power level %d, flags = 0x%X\n",
83 iwlmvm_mod_params.power_scheme,
84 le16_to_cpu(cmd->flags));
85 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
86
87 if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
88 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
89 le32_to_cpu(cmd->rx_data_timeout));
90 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
91 le32_to_cpu(cmd->tx_data_timeout));
92 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
93 IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
94 le32_to_cpu(cmd->skip_dtim_periods));
95 if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
96 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
97 le32_to_cpu(cmd->lprx_rssi_threshold));
98 }
99}
100
101static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
102 struct ieee80211_vif *vif,
103 struct iwl_powertable_cmd *cmd)
104{
105 struct ieee80211_hw *hw = mvm->hw;
106 struct ieee80211_chanctx_conf *chanctx_conf;
107 struct ieee80211_channel *chan;
108 int dtimper, dtimper_msec;
109 int keep_alive;
110 bool radar_detect = false;
111 struct iwl_mvm_vif *mvmvif __maybe_unused =
112 iwl_mvm_vif_from_mac80211(vif);
113
114 /*
115 * Regardless of power management state the driver must set
116 * keep alive period. FW will use it for sending keep alive NDPs
117 * immediately after association.
118 */
119 cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
120
121 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
122 return;
123
124 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
125 if (!vif->bss_conf.assoc)
126 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
127
128#ifdef CONFIG_IWLWIFI_DEBUGFS
129 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
130 mvmvif->dbgfs_pm.disable_power_off)
131 cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
132#endif
133 if (!vif->bss_conf.ps)
134 return;
135
136 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
137
138 if (vif->bss_conf.beacon_rate &&
139 (vif->bss_conf.beacon_rate->bitrate == 10 ||
140 vif->bss_conf.beacon_rate->bitrate == 60)) {
141 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
142 cmd->lprx_rssi_threshold =
143 cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
144 }
145
146 dtimper = hw->conf.ps_dtim_period ?: 1;
147
148 /* Check if radar detection is required on current channel */
149 rcu_read_lock();
150 chanctx_conf = rcu_dereference(vif->chanctx_conf);
151 WARN_ON(!chanctx_conf);
152 if (chanctx_conf) {
153 chan = chanctx_conf->def.chan;
154 radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
155 }
156 rcu_read_unlock();
157
158 /* Check skip over DTIM conditions */
159 if (!radar_detect && (dtimper <= 10) &&
160 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
161 mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
162 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
163 cmd->skip_dtim_periods = cpu_to_le32(3);
164 }
165
166 /* Check that keep alive period is at least 3 * DTIM */
167 dtimper_msec = dtimper * vif->bss_conf.beacon_int;
168 keep_alive = max_t(int, 3 * dtimper_msec,
169 MSEC_PER_SEC * cmd->keep_alive_seconds);
170 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
171 cmd->keep_alive_seconds = keep_alive;
172
173 if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
174 cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
175 cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
176 } else {
177 cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
178 cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
179 }
180
181#ifdef CONFIG_IWLWIFI_DEBUGFS
182 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
183 cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
184 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
185 if (mvmvif->dbgfs_pm.skip_over_dtim)
186 cmd->flags |=
187 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
188 else
189 cmd->flags &=
190 cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
191 }
192 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
193 cmd->rx_data_timeout =
194 cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
195 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
196 cmd->tx_data_timeout =
197 cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
198 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
199 cmd->skip_dtim_periods =
200 cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
201 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
202 if (mvmvif->dbgfs_pm.lprx_ena)
203 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
204 else
205 cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
206 }
207 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
208 cmd->lprx_rssi_threshold =
209 cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
210#endif /* CONFIG_IWLWIFI_DEBUGFS */
211}
212
213static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
214 struct ieee80211_vif *vif)
215{
216 int ret;
217 bool ba_enable;
218 struct iwl_powertable_cmd cmd = {};
219
220 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
221 return 0;
222
223 /*
224 * TODO: The following vif_count verification is temporary condition.
225 * Avoid power mode update if more than one interface is currently
226 * active. Remove this condition when FW will support power management
227 * on multiple MACs.
228 */
229 IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
230 mvm->vif_count);
231 if (mvm->vif_count > 1)
232 return 0;
233
234 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
235 iwl_mvm_power_log(mvm, &cmd);
236
237 ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
238 sizeof(cmd), &cmd);
239 if (ret)
240 return ret;
241
242 ba_enable = !!(cmd.flags &
243 cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
244
245 return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
246}
247
248static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
249 struct ieee80211_vif *vif)
250{
251 struct iwl_powertable_cmd cmd = {};
252 struct iwl_mvm_vif *mvmvif __maybe_unused =
253 iwl_mvm_vif_from_mac80211(vif);
254
255 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
256 return 0;
257
258 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
259 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
260
261#ifdef CONFIG_IWLWIFI_DEBUGFS
262 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
263 mvmvif->dbgfs_pm.disable_power_off)
264 cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
265#endif
266 iwl_mvm_power_log(mvm, &cmd);
267
268 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
269 sizeof(cmd), &cmd);
270}
271
272#ifdef CONFIG_IWLWIFI_DEBUGFS
273static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
274 struct ieee80211_vif *vif, char *buf,
275 int bufsz)
276{
277 struct iwl_powertable_cmd cmd = {};
278 int pos = 0;
279
280 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
281
282 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
283 (cmd.flags &
284 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
285 0 : 1);
286 pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
287 le32_to_cpu(cmd.skip_dtim_periods));
288 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
289 iwlmvm_mod_params.power_scheme);
290 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
291 le16_to_cpu(cmd.flags));
292 pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
293 cmd.keep_alive_seconds);
294
295 if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
296 pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
297 (cmd.flags &
298 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
299 1 : 0);
300 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
301 le32_to_cpu(cmd.rx_data_timeout));
302 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
303 le32_to_cpu(cmd.tx_data_timeout));
304 if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
305 pos += scnprintf(buf+pos, bufsz-pos,
306 "lprx_rssi_threshold = %d\n",
307 le32_to_cpu(cmd.lprx_rssi_threshold));
308 }
309 return pos;
310}
311#endif
312
313const struct iwl_mvm_power_ops pm_legacy_ops = {
314 .power_update_mode = iwl_mvm_power_legacy_update_mode,
315 .power_disable = iwl_mvm_power_legacy_disable,
316#ifdef CONFIG_IWLWIFI_DEBUGFS
317 .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
318#endif
319};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index ce5db6c4ef7e..35e86e06dffd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -65,9 +65,14 @@
65#include "fw-api.h" 65#include "fw-api.h"
66#include "mvm.h" 66#include "mvm.h"
67 67
68#define QUOTA_100 IWL_MVM_MAX_QUOTA
69#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
70
68struct iwl_mvm_quota_iterator_data { 71struct iwl_mvm_quota_iterator_data {
69 int n_interfaces[MAX_BINDINGS]; 72 int n_interfaces[MAX_BINDINGS];
70 int colors[MAX_BINDINGS]; 73 int colors[MAX_BINDINGS];
74 int low_latency[MAX_BINDINGS];
75 int n_low_latency_bindings;
71 struct ieee80211_vif *new_vif; 76 struct ieee80211_vif *new_vif;
72}; 77};
73 78
@@ -107,22 +112,29 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
107 switch (vif->type) { 112 switch (vif->type) {
108 case NL80211_IFTYPE_STATION: 113 case NL80211_IFTYPE_STATION:
109 if (vif->bss_conf.assoc) 114 if (vif->bss_conf.assoc)
110 data->n_interfaces[id]++; 115 break;
111 break; 116 return;
112 case NL80211_IFTYPE_AP: 117 case NL80211_IFTYPE_AP:
113 case NL80211_IFTYPE_ADHOC: 118 case NL80211_IFTYPE_ADHOC:
114 if (mvmvif->ap_ibss_active) 119 if (mvmvif->ap_ibss_active)
115 data->n_interfaces[id]++; 120 break;
116 break; 121 return;
117 case NL80211_IFTYPE_MONITOR: 122 case NL80211_IFTYPE_MONITOR:
118 if (mvmvif->monitor_active) 123 if (mvmvif->monitor_active)
119 data->n_interfaces[id]++; 124 break;
120 break; 125 return;
121 case NL80211_IFTYPE_P2P_DEVICE: 126 case NL80211_IFTYPE_P2P_DEVICE:
122 break; 127 return;
123 default: 128 default:
124 WARN_ON_ONCE(1); 129 WARN_ON_ONCE(1);
125 break; 130 return;
131 }
132
133 data->n_interfaces[id]++;
134
135 if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
136 data->n_low_latency_bindings++;
137 data->low_latency[id] = true;
126 } 138 }
127} 139}
128 140
@@ -162,7 +174,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
162int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) 174int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
163{ 175{
164 struct iwl_time_quota_cmd cmd = {}; 176 struct iwl_time_quota_cmd cmd = {};
165 int i, idx, ret, num_active_macs, quota, quota_rem; 177 int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
166 struct iwl_mvm_quota_iterator_data data = { 178 struct iwl_mvm_quota_iterator_data data = {
167 .n_interfaces = {}, 179 .n_interfaces = {},
168 .colors = { -1, -1, -1, -1 }, 180 .colors = { -1, -1, -1, -1 },
@@ -197,11 +209,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
197 num_active_macs += data.n_interfaces[i]; 209 num_active_macs += data.n_interfaces[i];
198 } 210 }
199 211
200 quota = 0; 212 n_non_lowlat = num_active_macs;
201 quota_rem = 0; 213
202 if (num_active_macs) { 214 if (data.n_low_latency_bindings == 1) {
203 quota = IWL_MVM_MAX_QUOTA / num_active_macs; 215 for (i = 0; i < MAX_BINDINGS; i++) {
204 quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs; 216 if (data.low_latency[i]) {
217 n_non_lowlat -= data.n_interfaces[i];
218 break;
219 }
220 }
221 }
222
223 if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
224 /*
225 * Reserve quota for the low latency binding in case that
226 * there are several data bindings but only a single
227 * low latency one. Split the rest of the quota equally
228 * between the other data interfaces.
229 */
230 quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
231 quota_rem = QUOTA_100 - n_non_lowlat * quota -
232 QUOTA_LOWLAT_MIN;
233 } else if (num_active_macs) {
234 /*
235 * There are 0 or more than 1 low latency bindings, or all the
236 * data interfaces belong to the single low latency binding.
237 * Split the quota equally between the data interfaces.
238 */
239 quota = QUOTA_100 / num_active_macs;
240 quota_rem = QUOTA_100 % num_active_macs;
241 } else {
242 /* values don't really matter - won't be used */
243 quota = 0;
244 quota_rem = 0;
205 } 245 }
206 246
207 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { 247 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -211,19 +251,37 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
211 cmd.quotas[idx].id_and_color = 251 cmd.quotas[idx].id_and_color =
212 cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); 252 cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
213 253
214 if (data.n_interfaces[i] <= 0) { 254 if (data.n_interfaces[i] <= 0)
215 cmd.quotas[idx].quota = cpu_to_le32(0); 255 cmd.quotas[idx].quota = cpu_to_le32(0);
216 cmd.quotas[idx].max_duration = cpu_to_le32(0); 256 else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
217 } else { 257 data.low_latency[i])
258 /*
259 * There is more than one binding, but only one of the
260 * bindings is in low latency. For this case, allocate
261 * the minimal required quota for the low latency
262 * binding.
263 */
264 cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
265 else
218 cmd.quotas[idx].quota = 266 cmd.quotas[idx].quota =
219 cpu_to_le32(quota * data.n_interfaces[i]); 267 cpu_to_le32(quota * data.n_interfaces[i]);
220 cmd.quotas[idx].max_duration = cpu_to_le32(0); 268
221 } 269 WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
270 "Binding=%d, quota=%u > max=%u\n",
271 idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
272
273 cmd.quotas[idx].max_duration = cpu_to_le32(0);
274
222 idx++; 275 idx++;
223 } 276 }
224 277
225 /* Give the remainder of the session to the first binding */ 278 /* Give the remainder of the session to the first data binding */
226 le32_add_cpu(&cmd.quotas[0].quota, quota_rem); 279 for (i = 0; i < MAX_BINDINGS; i++) {
280 if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
281 le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
282 break;
283 }
284 }
227 285
228 iwl_mvm_adjust_quota_for_noa(mvm, &cmd); 286 iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
229 287
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 6abf74e1351f..568abd61b14f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -166,7 +166,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
166 if (sta->smps_mode == IEEE80211_SMPS_STATIC) 166 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
167 return false; 167 return false;
168 168
169 if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2) 169 if (num_of_ant(mvm->fw->valid_tx_ant) < 2)
170 return false; 170 return false;
171 171
172 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) 172 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -211,9 +211,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
211 .next_columns = { 211 .next_columns = {
212 RS_COLUMN_LEGACY_ANT_B, 212 RS_COLUMN_LEGACY_ANT_B,
213 RS_COLUMN_SISO_ANT_A, 213 RS_COLUMN_SISO_ANT_A,
214 RS_COLUMN_SISO_ANT_B,
214 RS_COLUMN_MIMO2, 215 RS_COLUMN_MIMO2,
215 RS_COLUMN_INVALID, 216 RS_COLUMN_MIMO2_SGI,
216 RS_COLUMN_INVALID,
217 }, 217 },
218 }, 218 },
219 [RS_COLUMN_LEGACY_ANT_B] = { 219 [RS_COLUMN_LEGACY_ANT_B] = {
@@ -221,10 +221,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
221 .ant = ANT_B, 221 .ant = ANT_B,
222 .next_columns = { 222 .next_columns = {
223 RS_COLUMN_LEGACY_ANT_A, 223 RS_COLUMN_LEGACY_ANT_A,
224 RS_COLUMN_SISO_ANT_A,
224 RS_COLUMN_SISO_ANT_B, 225 RS_COLUMN_SISO_ANT_B,
225 RS_COLUMN_MIMO2, 226 RS_COLUMN_MIMO2,
226 RS_COLUMN_INVALID, 227 RS_COLUMN_MIMO2_SGI,
227 RS_COLUMN_INVALID,
228 }, 228 },
229 }, 229 },
230 [RS_COLUMN_SISO_ANT_A] = { 230 [RS_COLUMN_SISO_ANT_A] = {
@@ -234,8 +234,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
234 RS_COLUMN_SISO_ANT_B, 234 RS_COLUMN_SISO_ANT_B,
235 RS_COLUMN_MIMO2, 235 RS_COLUMN_MIMO2,
236 RS_COLUMN_SISO_ANT_A_SGI, 236 RS_COLUMN_SISO_ANT_A_SGI,
237 RS_COLUMN_INVALID, 237 RS_COLUMN_SISO_ANT_B_SGI,
238 RS_COLUMN_INVALID, 238 RS_COLUMN_MIMO2_SGI,
239 }, 239 },
240 .checks = { 240 .checks = {
241 rs_siso_allow, 241 rs_siso_allow,
@@ -248,8 +248,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
248 RS_COLUMN_SISO_ANT_A, 248 RS_COLUMN_SISO_ANT_A,
249 RS_COLUMN_MIMO2, 249 RS_COLUMN_MIMO2,
250 RS_COLUMN_SISO_ANT_B_SGI, 250 RS_COLUMN_SISO_ANT_B_SGI,
251 RS_COLUMN_INVALID, 251 RS_COLUMN_SISO_ANT_A_SGI,
252 RS_COLUMN_INVALID, 252 RS_COLUMN_MIMO2_SGI,
253 }, 253 },
254 .checks = { 254 .checks = {
255 rs_siso_allow, 255 rs_siso_allow,
@@ -263,8 +263,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
263 RS_COLUMN_SISO_ANT_B_SGI, 263 RS_COLUMN_SISO_ANT_B_SGI,
264 RS_COLUMN_MIMO2_SGI, 264 RS_COLUMN_MIMO2_SGI,
265 RS_COLUMN_SISO_ANT_A, 265 RS_COLUMN_SISO_ANT_A,
266 RS_COLUMN_INVALID, 266 RS_COLUMN_SISO_ANT_B,
267 RS_COLUMN_INVALID, 267 RS_COLUMN_MIMO2,
268 }, 268 },
269 .checks = { 269 .checks = {
270 rs_siso_allow, 270 rs_siso_allow,
@@ -279,8 +279,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
279 RS_COLUMN_SISO_ANT_A_SGI, 279 RS_COLUMN_SISO_ANT_A_SGI,
280 RS_COLUMN_MIMO2_SGI, 280 RS_COLUMN_MIMO2_SGI,
281 RS_COLUMN_SISO_ANT_B, 281 RS_COLUMN_SISO_ANT_B,
282 RS_COLUMN_INVALID, 282 RS_COLUMN_SISO_ANT_A,
283 RS_COLUMN_INVALID, 283 RS_COLUMN_MIMO2,
284 }, 284 },
285 .checks = { 285 .checks = {
286 rs_siso_allow, 286 rs_siso_allow,
@@ -292,10 +292,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
292 .ant = ANT_AB, 292 .ant = ANT_AB,
293 .next_columns = { 293 .next_columns = {
294 RS_COLUMN_SISO_ANT_A, 294 RS_COLUMN_SISO_ANT_A,
295 RS_COLUMN_SISO_ANT_B,
296 RS_COLUMN_SISO_ANT_A_SGI,
297 RS_COLUMN_SISO_ANT_B_SGI,
295 RS_COLUMN_MIMO2_SGI, 298 RS_COLUMN_MIMO2_SGI,
296 RS_COLUMN_INVALID,
297 RS_COLUMN_INVALID,
298 RS_COLUMN_INVALID,
299 }, 299 },
300 .checks = { 300 .checks = {
301 rs_mimo_allow, 301 rs_mimo_allow,
@@ -307,10 +307,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
307 .sgi = true, 307 .sgi = true,
308 .next_columns = { 308 .next_columns = {
309 RS_COLUMN_SISO_ANT_A_SGI, 309 RS_COLUMN_SISO_ANT_A_SGI,
310 RS_COLUMN_SISO_ANT_B_SGI,
311 RS_COLUMN_SISO_ANT_A,
312 RS_COLUMN_SISO_ANT_B,
310 RS_COLUMN_MIMO2, 313 RS_COLUMN_MIMO2,
311 RS_COLUMN_INVALID,
312 RS_COLUMN_INVALID,
313 RS_COLUMN_INVALID,
314 }, 314 },
315 .checks = { 315 .checks = {
316 rs_mimo_allow, 316 rs_mimo_allow,
@@ -380,49 +380,49 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
380 * (2.4 GHz) band. 380 * (2.4 GHz) band.
381 */ 381 */
382 382
383static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { 383static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
384 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0 384 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
385}; 385};
386 386
387/* Expected TpT tables. 4 indexes: 387/* Expected TpT tables. 4 indexes:
388 * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI 388 * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
389 */ 389 */
390static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = { 390static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
391 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0}, 391 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
392 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0}, 392 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
393 {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0}, 393 {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
394 {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0}, 394 {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
395}; 395};
396 396
397static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = { 397static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
398 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275}, 398 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
399 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280}, 399 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
400 {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173}, 400 {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
401 {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284}, 401 {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
402}; 402};
403 403
404static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = { 404static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
405 {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308}, 405 {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
406 {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312}, 406 {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
407 {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466}, 407 {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
408 {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691}, 408 {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
409}; 409};
410 410
411static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { 411static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
412 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0}, 412 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
413 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0}, 413 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
414 {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0}, 414 {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
415 {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0}, 415 {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
416}; 416};
417 417
418static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { 418static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
419 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300}, 419 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
420 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303}, 420 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
421 {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053}, 421 {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
422 {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221}, 422 {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
423}; 423};
424 424
425static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = { 425static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
426 {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319}, 426 {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
427 {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320}, 427 {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
428 {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219}, 428 {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
@@ -503,6 +503,14 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
503 window->average_tpt = IWL_INVALID_VALUE; 503 window->average_tpt = IWL_INVALID_VALUE;
504} 504}
505 505
506static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl)
507{
508 int i;
509
510 for (i = 0; i < IWL_RATE_COUNT; i++)
511 rs_rate_scale_clear_window(&tbl->win[i]);
512}
513
506static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) 514static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
507{ 515{
508 return (ant_type & valid_antenna) == ant_type; 516 return (ant_type & valid_antenna) == ant_type;
@@ -566,19 +574,13 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
566 * at this rate. window->data contains the bitmask of successful 574 * at this rate. window->data contains the bitmask of successful
567 * packets. 575 * packets.
568 */ 576 */
569static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, 577static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
570 int scale_index, int attempts, int successes) 578 int scale_index, int attempts, int successes,
579 struct iwl_rate_scale_data *window)
571{ 580{
572 struct iwl_rate_scale_data *window = NULL;
573 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); 581 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
574 s32 fail_count, tpt; 582 s32 fail_count, tpt;
575 583
576 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
577 return -EINVAL;
578
579 /* Select window for current tx bit rate */
580 window = &(tbl->win[scale_index]);
581
582 /* Get expected throughput */ 584 /* Get expected throughput */
583 tpt = get_expected_tpt(tbl, scale_index); 585 tpt = get_expected_tpt(tbl, scale_index);
584 586
@@ -636,6 +638,21 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
636 return 0; 638 return 0;
637} 639}
638 640
641static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
642 int scale_index, int attempts, int successes)
643{
644 struct iwl_rate_scale_data *window = NULL;
645
646 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
647 return -EINVAL;
648
649 /* Select window for current tx bit rate */
650 window = &(tbl->win[scale_index]);
651
652 return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
653 window);
654}
655
639/* Convert rs_rate object into ucode rate bitmask */ 656/* Convert rs_rate object into ucode rate bitmask */
640static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, 657static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
641 struct rs_rate *rate) 658 struct rs_rate *rate)
@@ -905,7 +922,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
905 922
906 rate->bw = RATE_MCS_CHAN_WIDTH_20; 923 rate->bw = RATE_MCS_CHAN_WIDTH_20;
907 924
908 WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX && 925 WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
909 rate->index > IWL_RATE_MCS_9_INDEX); 926 rate->index > IWL_RATE_MCS_9_INDEX);
910 927
911 rate->index = rs_ht_to_legacy[rate->index]; 928 rate->index = rs_ht_to_legacy[rate->index];
@@ -917,7 +934,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
917 934
918 935
919 if (num_of_ant(rate->ant) > 1) 936 if (num_of_ant(rate->ant) > 1)
920 rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw)); 937 rate->ant = first_antenna(mvm->fw->valid_tx_ant);
921 938
922 /* Relevant in both switching to SISO or Legacy */ 939 /* Relevant in both switching to SISO or Legacy */
923 rate->sgi = false; 940 rate->sgi = false;
@@ -1169,12 +1186,12 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1169 lq_sta->visited_columns = 0; 1186 lq_sta->visited_columns = 0;
1170} 1187}
1171 1188
1172static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, 1189static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1173 const struct rs_tx_column *column, 1190 const struct rs_tx_column *column,
1174 u32 bw) 1191 u32 bw)
1175{ 1192{
1176 /* Used to choose among HT tables */ 1193 /* Used to choose among HT tables */
1177 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; 1194 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1178 1195
1179 if (WARN_ON_ONCE(column->mode != RS_LEGACY && 1196 if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
1180 column->mode != RS_SISO && 1197 column->mode != RS_SISO &&
@@ -1262,9 +1279,8 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1262 &(lq_sta->lq_info[lq_sta->active_tbl]); 1279 &(lq_sta->lq_info[lq_sta->active_tbl]);
1263 s32 active_sr = active_tbl->win[index].success_ratio; 1280 s32 active_sr = active_tbl->win[index].success_ratio;
1264 s32 active_tpt = active_tbl->expected_tpt[index]; 1281 s32 active_tpt = active_tbl->expected_tpt[index];
1265
1266 /* expected "search" throughput */ 1282 /* expected "search" throughput */
1267 s32 *tpt_tbl = tbl->expected_tpt; 1283 const u16 *tpt_tbl = tbl->expected_tpt;
1268 1284
1269 s32 new_rate, high, low, start_hi; 1285 s32 new_rate, high, low, start_hi;
1270 u16 high_low; 1286 u16 high_low;
@@ -1362,7 +1378,6 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
1362static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) 1378static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1363{ 1379{
1364 struct iwl_scale_tbl_info *tbl; 1380 struct iwl_scale_tbl_info *tbl;
1365 int i;
1366 int active_tbl; 1381 int active_tbl;
1367 int flush_interval_passed = 0; 1382 int flush_interval_passed = 0;
1368 struct iwl_mvm *mvm; 1383 struct iwl_mvm *mvm;
@@ -1423,9 +1438,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1423 1438
1424 IWL_DEBUG_RATE(mvm, 1439 IWL_DEBUG_RATE(mvm,
1425 "LQ: stay in table clear win\n"); 1440 "LQ: stay in table clear win\n");
1426 for (i = 0; i < IWL_RATE_COUNT; i++) 1441 rs_rate_scale_clear_tbl_windows(tbl);
1427 rs_rate_scale_clear_window(
1428 &(tbl->win[i]));
1429 } 1442 }
1430 } 1443 }
1431 1444
@@ -1434,8 +1447,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1434 * "search" table). */ 1447 * "search" table). */
1435 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { 1448 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
1436 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); 1449 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
1437 for (i = 0; i < IWL_RATE_COUNT; i++) 1450 rs_rate_scale_clear_tbl_windows(tbl);
1438 rs_rate_scale_clear_window(&(tbl->win[i]));
1439 } 1451 }
1440 } 1452 }
1441} 1453}
@@ -1478,8 +1490,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1478 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; 1490 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1479 const struct rs_tx_column *next_col; 1491 const struct rs_tx_column *next_col;
1480 allow_column_func_t allow_func; 1492 allow_column_func_t allow_func;
1481 u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw); 1493 u8 valid_ants = mvm->fw->valid_tx_ant;
1482 s32 *expected_tpt_tbl; 1494 const u16 *expected_tpt_tbl;
1483 s32 tpt, max_expected_tpt; 1495 s32 tpt, max_expected_tpt;
1484 1496
1485 for (i = 0; i < MAX_NEXT_COLUMNS; i++) { 1497 for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
@@ -1725,7 +1737,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1725 int low = IWL_RATE_INVALID; 1737 int low = IWL_RATE_INVALID;
1726 int high = IWL_RATE_INVALID; 1738 int high = IWL_RATE_INVALID;
1727 int index; 1739 int index;
1728 int i;
1729 struct iwl_rate_scale_data *window = NULL; 1740 struct iwl_rate_scale_data *window = NULL;
1730 int current_tpt = IWL_INVALID_VALUE; 1741 int current_tpt = IWL_INVALID_VALUE;
1731 int low_tpt = IWL_INVALID_VALUE; 1742 int low_tpt = IWL_INVALID_VALUE;
@@ -2010,8 +2021,7 @@ lq_update:
2010 if (lq_sta->search_better_tbl) { 2021 if (lq_sta->search_better_tbl) {
2011 /* Access the "search" table, clear its history. */ 2022 /* Access the "search" table, clear its history. */
2012 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 2023 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2013 for (i = 0; i < IWL_RATE_COUNT; i++) 2024 rs_rate_scale_clear_tbl_windows(tbl);
2014 rs_rate_scale_clear_window(&(tbl->win[i]));
2015 2025
2016 /* Use new "search" start rate */ 2026 /* Use new "search" start rate */
2017 index = tbl->rate.index; 2027 index = tbl->rate.index;
@@ -2090,7 +2100,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2090 2100
2091 i = lq_sta->last_txrate_idx; 2101 i = lq_sta->last_txrate_idx;
2092 2102
2093 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw); 2103 valid_tx_ant = mvm->fw->valid_tx_ant;
2094 2104
2095 if (!lq_sta->search_better_tbl) 2105 if (!lq_sta->search_better_tbl)
2096 active_tbl = lq_sta->active_tbl; 2106 active_tbl = lq_sta->active_tbl;
@@ -2241,6 +2251,73 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2241 } 2251 }
2242} 2252}
2243 2253
2254#ifdef CONFIG_IWLWIFI_DEBUGFS
2255static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm,
2256 struct iwl_mvm_frame_stats *stats)
2257{
2258 spin_lock_bh(&mvm->drv_stats_lock);
2259 memset(stats, 0, sizeof(*stats));
2260 spin_unlock_bh(&mvm->drv_stats_lock);
2261}
2262
2263void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
2264 struct iwl_mvm_frame_stats *stats,
2265 u32 rate, bool agg)
2266{
2267 u8 nss = 0, mcs = 0;
2268
2269 spin_lock(&mvm->drv_stats_lock);
2270
2271 if (agg)
2272 stats->agg_frames++;
2273
2274 stats->success_frames++;
2275
2276 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
2277 case RATE_MCS_CHAN_WIDTH_20:
2278 stats->bw_20_frames++;
2279 break;
2280 case RATE_MCS_CHAN_WIDTH_40:
2281 stats->bw_40_frames++;
2282 break;
2283 case RATE_MCS_CHAN_WIDTH_80:
2284 stats->bw_80_frames++;
2285 break;
2286 default:
2287 WARN_ONCE(1, "bad BW. rate 0x%x", rate);
2288 }
2289
2290 if (rate & RATE_MCS_HT_MSK) {
2291 stats->ht_frames++;
2292 mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
2293 nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
2294 } else if (rate & RATE_MCS_VHT_MSK) {
2295 stats->vht_frames++;
2296 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
2297 nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
2298 RATE_VHT_MCS_NSS_POS) + 1;
2299 } else {
2300 stats->legacy_frames++;
2301 }
2302
2303 if (nss == 1)
2304 stats->siso_frames++;
2305 else if (nss == 2)
2306 stats->mimo2_frames++;
2307
2308 if (rate & RATE_MCS_SGI_MSK)
2309 stats->sgi_frames++;
2310 else
2311 stats->ngi_frames++;
2312
2313 stats->last_rates[stats->last_frame_idx] = rate;
2314 stats->last_frame_idx = (stats->last_frame_idx + 1) %
2315 ARRAY_SIZE(stats->last_rates);
2316
2317 spin_unlock(&mvm->drv_stats_lock);
2318}
2319#endif
2320
2244/* 2321/*
2245 * Called after adding a new station to initialize rate scaling 2322 * Called after adding a new station to initialize rate scaling
2246 */ 2323 */
@@ -2265,8 +2342,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2265 lq_sta->lq.sta_id = sta_priv->sta_id; 2342 lq_sta->lq.sta_id = sta_priv->sta_id;
2266 2343
2267 for (j = 0; j < LQ_SIZE; j++) 2344 for (j = 0; j < LQ_SIZE; j++)
2268 for (i = 0; i < IWL_RATE_COUNT; i++) 2345 rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]);
2269 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2270 2346
2271 lq_sta->flush_timer = 0; 2347 lq_sta->flush_timer = 0;
2272 2348
@@ -2320,7 +2396,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2320 2396
2321 /* These values will be overridden later */ 2397 /* These values will be overridden later */
2322 lq_sta->lq.single_stream_ant_msk = 2398 lq_sta->lq.single_stream_ant_msk =
2323 first_antenna(iwl_fw_valid_tx_ant(mvm->fw)); 2399 first_antenna(mvm->fw->valid_tx_ant);
2324 lq_sta->lq.dual_stream_ant_msk = ANT_AB; 2400 lq_sta->lq.dual_stream_ant_msk = ANT_AB;
2325 2401
2326 /* as default allow aggregation for all tids */ 2402 /* as default allow aggregation for all tids */
@@ -2335,7 +2411,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2335#ifdef CONFIG_MAC80211_DEBUGFS 2411#ifdef CONFIG_MAC80211_DEBUGFS
2336 lq_sta->dbg_fixed_rate = 0; 2412 lq_sta->dbg_fixed_rate = 0;
2337#endif 2413#endif
2338 2414#ifdef CONFIG_IWLWIFI_DEBUGFS
2415 iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
2416#endif
2339 rs_initialize_lq(mvm, sta, lq_sta, band, init); 2417 rs_initialize_lq(mvm, sta, lq_sta, band, init);
2340} 2418}
2341 2419
@@ -2446,7 +2524,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2446 2524
2447 memcpy(&rate, initial_rate, sizeof(rate)); 2525 memcpy(&rate, initial_rate, sizeof(rate));
2448 2526
2449 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw); 2527 valid_tx_ant = mvm->fw->valid_tx_ant;
2450 2528
2451 if (is_siso(&rate)) { 2529 if (is_siso(&rate)) {
2452 num_rates = RS_INITIAL_SISO_NUM_RATES; 2530 num_rates = RS_INITIAL_SISO_NUM_RATES;
@@ -2523,7 +2601,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
2523 2601
2524 if (sta) 2602 if (sta)
2525 lq_cmd->agg_time_limit = 2603 lq_cmd->agg_time_limit =
2526 cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta)); 2604 cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
2527} 2605}
2528 2606
2529static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 2607static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2547,7 +2625,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
2547} 2625}
2548 2626
2549#ifdef CONFIG_MAC80211_DEBUGFS 2627#ifdef CONFIG_MAC80211_DEBUGFS
2550static int rs_pretty_print_rate(char *buf, const u32 rate) 2628int rs_pretty_print_rate(char *buf, const u32 rate)
2551{ 2629{
2552 2630
2553 char *type, *bw; 2631 char *type, *bw;
@@ -2596,7 +2674,7 @@ static int rs_pretty_print_rate(char *buf, const u32 rate)
2596 return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n", 2674 return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
2597 type, rs_pretty_ant(ant), bw, mcs, nss, 2675 type, rs_pretty_ant(ant), bw, mcs, nss,
2598 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", 2676 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
2599 (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", 2677 (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
2600 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", 2678 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
2601 (rate & RATE_MCS_BF_MSK) ? "BF " : "", 2679 (rate & RATE_MCS_BF_MSK) ? "BF " : "",
2602 (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : ""); 2680 (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
@@ -2677,9 +2755,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2677 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2755 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2678 lq_sta->dbg_fixed_rate); 2756 lq_sta->dbg_fixed_rate);
2679 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 2757 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2680 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_A) ? "ANT_A," : "", 2758 (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "",
2681 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "", 2759 (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "",
2682 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : ""); 2760 (mvm->fw->valid_tx_ant & ANT_C) ? "ANT_C" : "");
2683 desc += sprintf(buff+desc, "lq type %s\n", 2761 desc += sprintf(buff+desc, "lq type %s\n",
2684 (is_legacy(rate)) ? "legacy" : 2762 (is_legacy(rate)) ? "legacy" :
2685 is_vht(rate) ? "VHT" : "HT"); 2763 is_vht(rate) ? "VHT" : "HT");
@@ -2815,8 +2893,8 @@ static void rs_rate_init_stub(void *mvm_r,
2815 struct ieee80211_sta *sta, void *mvm_sta) 2893 struct ieee80211_sta *sta, void *mvm_sta)
2816{ 2894{
2817} 2895}
2818static struct rate_control_ops rs_mvm_ops = { 2896
2819 .module = NULL, 2897static const struct rate_control_ops rs_mvm_ops = {
2820 .name = RS_NAME, 2898 .name = RS_NAME,
2821 .tx_status = rs_tx_status, 2899 .tx_status = rs_tx_status,
2822 .get_rate = rs_get_rate, 2900 .get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 7bc6404f6986..3332b396011e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -277,7 +277,7 @@ enum rs_column {
277struct iwl_scale_tbl_info { 277struct iwl_scale_tbl_info {
278 struct rs_rate rate; 278 struct rs_rate rate;
279 enum rs_column column; 279 enum rs_column column;
280 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 280 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
281 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 281 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
282}; 282};
283 283
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index a85b60f7e67e..6061553a5e44 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -77,6 +77,15 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
77 77
78 memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); 78 memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
79 mvm->ampdu_ref++; 79 mvm->ampdu_ref++;
80
81#ifdef CONFIG_IWLWIFI_DEBUGFS
82 if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
83 spin_lock(&mvm->drv_stats_lock);
84 mvm->drv_rx_stats.ampdu_count++;
85 spin_unlock(&mvm->drv_stats_lock);
86 }
87#endif
88
80 return 0; 89 return 0;
81} 90}
82 91
@@ -129,22 +138,16 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
129 struct ieee80211_rx_status *rx_status) 138 struct ieee80211_rx_status *rx_status)
130{ 139{
131 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; 140 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
132 int rssi_all_band_a, rssi_all_band_b; 141 u32 agc_a, agc_b;
133 u32 agc_a, agc_b, max_agc;
134 u32 val; 142 u32 val;
135 143
136 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); 144 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
137 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS; 145 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
138 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS; 146 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
139 max_agc = max_t(u32, agc_a, agc_b);
140 147
141 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); 148 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
142 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; 149 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
143 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; 150 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
144 rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
145 IWL_OFDM_RSSI_ALLBAND_A_POS;
146 rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
147 IWL_OFDM_RSSI_ALLBAND_B_POS;
148 151
149 /* 152 /*
150 * dBm = rssi dB - agc dB - constant. 153 * dBm = rssi dB - agc dB - constant.
@@ -364,31 +367,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
364 rx_status.flag |= RX_FLAG_40MHZ; 367 rx_status.flag |= RX_FLAG_40MHZ;
365 break; 368 break;
366 case RATE_MCS_CHAN_WIDTH_80: 369 case RATE_MCS_CHAN_WIDTH_80:
367 rx_status.flag |= RX_FLAG_80MHZ; 370 rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
368 break; 371 break;
369 case RATE_MCS_CHAN_WIDTH_160: 372 case RATE_MCS_CHAN_WIDTH_160:
370 rx_status.flag |= RX_FLAG_160MHZ; 373 rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
371 break; 374 break;
372 } 375 }
373 if (rate_n_flags & RATE_MCS_SGI_MSK) 376 if (rate_n_flags & RATE_MCS_SGI_MSK)
374 rx_status.flag |= RX_FLAG_SHORT_GI; 377 rx_status.flag |= RX_FLAG_SHORT_GI;
375 if (rate_n_flags & RATE_HT_MCS_GF_MSK) 378 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
376 rx_status.flag |= RX_FLAG_HT_GF; 379 rx_status.flag |= RX_FLAG_HT_GF;
380 if (rate_n_flags & RATE_MCS_LDPC_MSK)
381 rx_status.flag |= RX_FLAG_LDPC;
377 if (rate_n_flags & RATE_MCS_HT_MSK) { 382 if (rate_n_flags & RATE_MCS_HT_MSK) {
383 u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
384 RATE_MCS_STBC_POS;
378 rx_status.flag |= RX_FLAG_HT; 385 rx_status.flag |= RX_FLAG_HT;
379 rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; 386 rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
387 rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
380 } else if (rate_n_flags & RATE_MCS_VHT_MSK) { 388 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
389 u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
390 RATE_MCS_STBC_POS;
381 rx_status.vht_nss = 391 rx_status.vht_nss =
382 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> 392 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
383 RATE_VHT_MCS_NSS_POS) + 1; 393 RATE_VHT_MCS_NSS_POS) + 1;
384 rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; 394 rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
385 rx_status.flag |= RX_FLAG_VHT; 395 rx_status.flag |= RX_FLAG_VHT;
396 rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
386 } else { 397 } else {
387 rx_status.rate_idx = 398 rx_status.rate_idx =
388 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, 399 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
389 rx_status.band); 400 rx_status.band);
390 } 401 }
391 402
403#ifdef CONFIG_IWLWIFI_DEBUGFS
404 iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
405 rx_status.flag & RX_FLAG_AMPDU_DETAILS);
406#endif
392 iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status, 407 iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
393 rxb, &rx_status); 408 rxb, &rx_status);
394 return 0; 409 return 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 742afc429c94..c91dc8498852 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -70,9 +70,16 @@
70 70
71#define IWL_PLCP_QUIET_THRESH 1 71#define IWL_PLCP_QUIET_THRESH 1
72#define IWL_ACTIVE_QUIET_TIME 10 72#define IWL_ACTIVE_QUIET_TIME 10
73#define LONG_OUT_TIME_PERIOD 600 73
74#define SHORT_OUT_TIME_PERIOD 200 74struct iwl_mvm_scan_params {
75#define SUSPEND_TIME_PERIOD 100 75 u32 max_out_time;
76 u32 suspend_time;
77 bool passive_fragmented;
78 struct _dwell {
79 u16 passive;
80 u16 active;
81 } dwell[IEEE80211_NUM_BANDS];
82};
76 83
77static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) 84static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
78{ 85{
@@ -82,7 +89,7 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
82 if (mvm->scan_rx_ant != ANT_NONE) 89 if (mvm->scan_rx_ant != ANT_NONE)
83 rx_ant = mvm->scan_rx_ant; 90 rx_ant = mvm->scan_rx_ant;
84 else 91 else
85 rx_ant = iwl_fw_valid_rx_ant(mvm->fw); 92 rx_ant = mvm->fw->valid_rx_ant;
86 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; 93 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
87 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; 94 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
88 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; 95 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -90,24 +97,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
90 return cpu_to_le16(rx_chain); 97 return cpu_to_le16(rx_chain);
91} 98}
92 99
93static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
94 u32 flags, bool is_assoc)
95{
96 if (!is_assoc)
97 return 0;
98 if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
99 return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
100 return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
101}
102
103static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
104 bool is_assoc)
105{
106 if (!is_assoc)
107 return 0;
108 return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
109}
110
111static inline __le32 100static inline __le32
112iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req) 101iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
113{ 102{
@@ -124,7 +113,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
124 u32 tx_ant; 113 u32 tx_ant;
125 114
126 mvm->scan_last_antenna_idx = 115 mvm->scan_last_antenna_idx =
127 iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw), 116 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
128 mvm->scan_last_antenna_idx); 117 mvm->scan_last_antenna_idx);
129 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; 118 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
130 119
@@ -181,15 +170,14 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
181 170
182static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd, 171static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
183 struct cfg80211_scan_request *req, 172 struct cfg80211_scan_request *req,
184 bool basic_ssid) 173 bool basic_ssid,
174 struct iwl_mvm_scan_params *params)
185{ 175{
186 u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
187 u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
188 req->n_ssids);
189 struct iwl_scan_channel *chan = (struct iwl_scan_channel *) 176 struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
190 (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); 177 (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
191 int i; 178 int i;
192 int type = BIT(req->n_ssids) - 1; 179 int type = BIT(req->n_ssids) - 1;
180 enum ieee80211_band band = req->channels[0]->band;
193 181
194 if (!basic_ssid) 182 if (!basic_ssid)
195 type |= BIT(req->n_ssids); 183 type |= BIT(req->n_ssids);
@@ -199,8 +187,8 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
199 chan->type = cpu_to_le32(type); 187 chan->type = cpu_to_le32(type);
200 if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR) 188 if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
201 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE); 189 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
202 chan->active_dwell = cpu_to_le16(active_dwell); 190 chan->active_dwell = cpu_to_le16(params->dwell[band].active);
203 chan->passive_dwell = cpu_to_le16(passive_dwell); 191 chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
204 chan->iteration_count = cpu_to_le16(1); 192 chan->iteration_count = cpu_to_le16(1);
205 chan++; 193 chan++;
206 } 194 }
@@ -267,13 +255,76 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
267 return (u16)len; 255 return (u16)len;
268} 256}
269 257
270static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac, 258static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
271 struct ieee80211_vif *vif) 259 struct ieee80211_vif *vif)
272{ 260{
273 bool *is_assoc = data; 261 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
262 bool *global_bound = data;
263
264 if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
265 *global_bound = true;
266}
267
268static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
269 struct ieee80211_vif *vif,
270 int n_ssids,
271 struct iwl_mvm_scan_params *params)
272{
273 bool global_bound = false;
274 enum ieee80211_band band;
275
276 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
277 IEEE80211_IFACE_ITER_NORMAL,
278 iwl_mvm_scan_condition_iterator,
279 &global_bound);
280 /*
281 * Under low latency traffic passive scan is fragmented meaning
282 * that dwell on a particular channel will be fragmented. Each fragment
283 * dwell time is 20ms and fragments period is 105ms. Skipping to next
284 * channel will be delayed by the same period - 105ms. So suspend_time
285 * parameter describing both fragments and channels skipping periods is
286 * set to 105ms. This value is chosen so that overall passive scan
287 * duration will not be too long. Max_out_time in this case is set to
288 * 70ms, so for active scanning operating channel will be left for 70ms
289 * while for passive still for 20ms (fragment dwell).
290 */
291 if (global_bound) {
292 if (!iwl_mvm_low_latency(mvm)) {
293 params->suspend_time = ieee80211_tu_to_usec(100);
294 params->max_out_time = ieee80211_tu_to_usec(600);
295 } else {
296 params->suspend_time = ieee80211_tu_to_usec(105);
297 /* P2P doesn't support fragmented passive scan, so
298 * configure max_out_time to be at least longest dwell
299 * time for passive scan.
300 */
301 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
302 params->max_out_time = ieee80211_tu_to_usec(70);
303 params->passive_fragmented = true;
304 } else {
305 u32 passive_dwell;
274 306
275 if (vif->bss_conf.assoc) 307 /*
276 *is_assoc = true; 308 * Use band G so that passive channel dwell time
309 * will be assigned with maximum value.
310 */
311 band = IEEE80211_BAND_2GHZ;
312 passive_dwell = iwl_mvm_get_passive_dwell(band);
313 params->max_out_time =
314 ieee80211_tu_to_usec(passive_dwell);
315 }
316 }
317 }
318
319 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
320 if (params->passive_fragmented)
321 params->dwell[band].passive = 20;
322 else
323 params->dwell[band].passive =
324 iwl_mvm_get_passive_dwell(band);
325 params->dwell[band].active = iwl_mvm_get_active_dwell(band,
326 n_ssids);
327 }
277} 328}
278 329
279int iwl_mvm_scan_request(struct iwl_mvm *mvm, 330int iwl_mvm_scan_request(struct iwl_mvm *mvm,
@@ -288,13 +339,13 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
288 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 339 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
289 }; 340 };
290 struct iwl_scan_cmd *cmd = mvm->scan_cmd; 341 struct iwl_scan_cmd *cmd = mvm->scan_cmd;
291 bool is_assoc = false;
292 int ret; 342 int ret;
293 u32 status; 343 u32 status;
294 int ssid_len = 0; 344 int ssid_len = 0;
295 u8 *ssid = NULL; 345 u8 *ssid = NULL;
296 bool basic_ssid = !(mvm->fw->ucode_capa.flags & 346 bool basic_ssid = !(mvm->fw->ucode_capa.flags &
297 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID); 347 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
348 struct iwl_mvm_scan_params params = {};
298 349
299 lockdep_assert_held(&mvm->mutex); 350 lockdep_assert_held(&mvm->mutex);
300 BUG_ON(mvm->scan_cmd == NULL); 351 BUG_ON(mvm->scan_cmd == NULL);
@@ -304,17 +355,18 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
304 memset(cmd, 0, sizeof(struct iwl_scan_cmd) + 355 memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
305 mvm->fw->ucode_capa.max_probe_length + 356 mvm->fw->ucode_capa.max_probe_length +
306 (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel))); 357 (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
307 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 358
308 IEEE80211_IFACE_ITER_NORMAL,
309 iwl_mvm_vif_assoc_iterator,
310 &is_assoc);
311 cmd->channel_count = (u8)req->n_channels; 359 cmd->channel_count = (u8)req->n_channels;
312 cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); 360 cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
313 cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); 361 cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
314 cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm); 362 cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
315 cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags, 363
316 is_assoc); 364 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
317 cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc); 365 cmd->max_out_time = cpu_to_le32(params.max_out_time);
366 cmd->suspend_time = cpu_to_le32(params.suspend_time);
367 if (params.passive_fragmented)
368 cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
369
318 cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req); 370 cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
319 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | 371 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
320 MAC_FILTER_IN_BEACON); 372 MAC_FILTER_IN_BEACON);
@@ -360,7 +412,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
360 req->ie, req->ie_len, 412 req->ie, req->ie_len,
361 mvm->fw->ucode_capa.max_probe_length)); 413 mvm->fw->ucode_capa.max_probe_length));
362 414
363 iwl_mvm_scan_fill_channels(cmd, req, basic_ssid); 415 iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
364 416
365 cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) + 417 cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
366 le16_to_cpu(cmd->tx_cmd.len) + 418 le16_to_cpu(cmd->tx_cmd.len) +
@@ -402,12 +454,17 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
402 struct iwl_rx_packet *pkt = rxb_addr(rxb); 454 struct iwl_rx_packet *pkt = rxb_addr(rxb);
403 struct iwl_scan_complete_notif *notif = (void *)pkt->data; 455 struct iwl_scan_complete_notif *notif = (void *)pkt->data;
404 456
457 lockdep_assert_held(&mvm->mutex);
458
405 IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n", 459 IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
406 notif->status, notif->scanned_channels); 460 notif->status, notif->scanned_channels);
407 461
408 mvm->scan_status = IWL_MVM_SCAN_NONE; 462 if (mvm->scan_status == IWL_MVM_SCAN_OS)
463 mvm->scan_status = IWL_MVM_SCAN_NONE;
409 ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK); 464 ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
410 465
466 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
467
411 return 0; 468 return 0;
412} 469}
413 470
@@ -464,7 +521,7 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
464 }; 521 };
465} 522}
466 523
467void iwl_mvm_cancel_scan(struct iwl_mvm *mvm) 524int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
468{ 525{
469 struct iwl_notification_wait wait_scan_abort; 526 struct iwl_notification_wait wait_scan_abort;
470 static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD, 527 static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
@@ -472,12 +529,13 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
472 int ret; 529 int ret;
473 530
474 if (mvm->scan_status == IWL_MVM_SCAN_NONE) 531 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
475 return; 532 return 0;
476 533
477 if (iwl_mvm_is_radio_killed(mvm)) { 534 if (iwl_mvm_is_radio_killed(mvm)) {
478 ieee80211_scan_completed(mvm->hw, true); 535 ieee80211_scan_completed(mvm->hw, true);
536 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
479 mvm->scan_status = IWL_MVM_SCAN_NONE; 537 mvm->scan_status = IWL_MVM_SCAN_NONE;
480 return; 538 return 0;
481 } 539 }
482 540
483 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, 541 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
@@ -488,18 +546,15 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
488 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); 546 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
489 if (ret) { 547 if (ret) {
490 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); 548 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
491 /* mac80211's state will be cleaned in the fw_restart flow */ 549 /* mac80211's state will be cleaned in the nic_restart flow */
492 goto out_remove_notif; 550 goto out_remove_notif;
493 } 551 }
494 552
495 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ); 553 return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
496 if (ret)
497 IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
498
499 return;
500 554
501out_remove_notif: 555out_remove_notif:
502 iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort); 556 iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
557 return ret;
503} 558}
504 559
505int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, 560int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -509,12 +564,18 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
509 struct iwl_rx_packet *pkt = rxb_addr(rxb); 564 struct iwl_rx_packet *pkt = rxb_addr(rxb);
510 struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data; 565 struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
511 566
567 /* scan status must be locked for proper checking */
568 lockdep_assert_held(&mvm->mutex);
569
512 IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n", 570 IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
513 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? 571 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
514 "completed" : "aborted"); 572 "completed" : "aborted");
515 573
516 mvm->scan_status = IWL_MVM_SCAN_NONE; 574 /* only call mac80211 completion if the stop was initiated by FW */
517 ieee80211_sched_scan_stopped(mvm->hw); 575 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
576 mvm->scan_status = IWL_MVM_SCAN_NONE;
577 ieee80211_sched_scan_stopped(mvm->hw);
578 }
518 579
519 return 0; 580 return 0;
520} 581}
@@ -545,14 +606,9 @@ static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
545static void iwl_build_scan_cmd(struct iwl_mvm *mvm, 606static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
546 struct ieee80211_vif *vif, 607 struct ieee80211_vif *vif,
547 struct cfg80211_sched_scan_request *req, 608 struct cfg80211_sched_scan_request *req,
548 struct iwl_scan_offload_cmd *scan) 609 struct iwl_scan_offload_cmd *scan,
610 struct iwl_mvm_scan_params *params)
549{ 611{
550 bool is_assoc = false;
551
552 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
553 IEEE80211_IFACE_ITER_NORMAL,
554 iwl_mvm_vif_assoc_iterator,
555 &is_assoc);
556 scan->channel_count = 612 scan->channel_count =
557 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + 613 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
558 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 614 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -560,13 +616,17 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
560 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); 616 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
561 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT; 617 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
562 scan->rx_chain = iwl_mvm_scan_rx_chain(mvm); 618 scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
563 scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags, 619
564 is_assoc); 620 scan->max_out_time = cpu_to_le32(params->max_out_time);
565 scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc); 621 scan->suspend_time = cpu_to_le32(params->suspend_time);
622
566 scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP | 623 scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
567 MAC_FILTER_IN_BEACON); 624 MAC_FILTER_IN_BEACON);
568 scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND); 625 scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
569 scan->rep_count = cpu_to_le32(1); 626 scan->rep_count = cpu_to_le32(1);
627
628 if (params->passive_fragmented)
629 scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
570} 630}
571 631
572static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) 632static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -596,6 +656,9 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
596 * config match list. 656 * config match list.
597 */ 657 */
598 for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) { 658 for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
659 /* skip empty SSID matchsets */
660 if (!req->match_sets[i].ssid.ssid_len)
661 continue;
599 scan->direct_scan[i].id = WLAN_EID_SSID; 662 scan->direct_scan[i].id = WLAN_EID_SSID;
600 scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len; 663 scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
601 memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid, 664 memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
@@ -628,12 +691,11 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
628 struct iwl_scan_channel_cfg *channels, 691 struct iwl_scan_channel_cfg *channels,
629 enum ieee80211_band band, 692 enum ieee80211_band band,
630 int *head, int *tail, 693 int *head, int *tail,
631 u32 ssid_bitmap) 694 u32 ssid_bitmap,
695 struct iwl_mvm_scan_params *params)
632{ 696{
633 struct ieee80211_supported_band *s_band; 697 struct ieee80211_supported_band *s_band;
634 int n_probes = req->n_ssids;
635 int n_channels = req->n_channels; 698 int n_channels = req->n_channels;
636 u8 active_dwell, passive_dwell;
637 int i, j, index = 0; 699 int i, j, index = 0;
638 bool partial; 700 bool partial;
639 701
@@ -643,8 +705,6 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
643 * to scan. So add requested channels to head of the list and others to 705 * to scan. So add requested channels to head of the list and others to
644 * the end. 706 * the end.
645 */ 707 */
646 active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
647 passive_dwell = iwl_mvm_get_passive_dwell(band);
648 s_band = &mvm->nvm_data->bands[band]; 708 s_band = &mvm->nvm_data->bands[band];
649 709
650 for (i = 0; i < s_band->n_channels && *head <= *tail; i++) { 710 for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
@@ -668,8 +728,8 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
668 channels->channel_number[index] = 728 channels->channel_number[index] =
669 cpu_to_le16(ieee80211_frequency_to_channel( 729 cpu_to_le16(ieee80211_frequency_to_channel(
670 s_band->channels[i].center_freq)); 730 s_band->channels[i].center_freq));
671 channels->dwell_time[index][0] = active_dwell; 731 channels->dwell_time[index][0] = params->dwell[band].active;
672 channels->dwell_time[index][1] = passive_dwell; 732 channels->dwell_time[index][1] = params->dwell[band].passive;
673 733
674 channels->iter_count[index] = cpu_to_le16(1); 734 channels->iter_count[index] = cpu_to_le16(1);
675 channels->iter_interval[index] = 0; 735 channels->iter_interval[index] = 0;
@@ -698,7 +758,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
698 struct cfg80211_sched_scan_request *req, 758 struct cfg80211_sched_scan_request *req,
699 struct ieee80211_sched_scan_ies *ies) 759 struct ieee80211_sched_scan_ies *ies)
700{ 760{
701 int supported_bands = 0;
702 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 761 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
703 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 762 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
704 int head = 0; 763 int head = 0;
@@ -712,22 +771,19 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
712 .id = SCAN_OFFLOAD_CONFIG_CMD, 771 .id = SCAN_OFFLOAD_CONFIG_CMD,
713 .flags = CMD_SYNC, 772 .flags = CMD_SYNC,
714 }; 773 };
774 struct iwl_mvm_scan_params params = {};
715 775
716 lockdep_assert_held(&mvm->mutex); 776 lockdep_assert_held(&mvm->mutex);
717 777
718 if (band_2ghz)
719 supported_bands++;
720 if (band_5ghz)
721 supported_bands++;
722
723 cmd_len = sizeof(struct iwl_scan_offload_cfg) + 778 cmd_len = sizeof(struct iwl_scan_offload_cfg) +
724 supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE; 779 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
725 780
726 scan_cfg = kzalloc(cmd_len, GFP_KERNEL); 781 scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
727 if (!scan_cfg) 782 if (!scan_cfg)
728 return -ENOMEM; 783 return -ENOMEM;
729 784
730 iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd); 785 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
786 iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
731 scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len); 787 scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
732 788
733 iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap); 789 iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
@@ -739,7 +795,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
739 scan_cfg->data); 795 scan_cfg->data);
740 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 796 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
741 IEEE80211_BAND_2GHZ, &head, &tail, 797 IEEE80211_BAND_2GHZ, &head, &tail,
742 ssid_bitmap); 798 ssid_bitmap, &params);
743 } 799 }
744 if (band_5ghz) { 800 if (band_5ghz) {
745 iwl_scan_offload_build_tx_cmd(mvm, vif, ies, 801 iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
@@ -749,7 +805,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
749 SCAN_OFFLOAD_PROBE_REQ_SIZE); 805 SCAN_OFFLOAD_PROBE_REQ_SIZE);
750 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 806 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
751 IEEE80211_BAND_5GHZ, &head, &tail, 807 IEEE80211_BAND_5GHZ, &head, &tail,
752 ssid_bitmap); 808 ssid_bitmap, &params);
753 } 809 }
754 810
755 cmd.data[0] = scan_cfg; 811 cmd.data[0] = scan_cfg;
@@ -889,26 +945,49 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
889 * microcode has notified us that a scan is completed. 945 * microcode has notified us that a scan is completed.
890 */ 946 */
891 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status); 947 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
892 ret = -EIO; 948 ret = -ENOENT;
893 } 949 }
894 950
895 return ret; 951 return ret;
896} 952}
897 953
898void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm) 954int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
899{ 955{
900 int ret; 956 int ret;
957 struct iwl_notification_wait wait_scan_done;
958 static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
901 959
902 lockdep_assert_held(&mvm->mutex); 960 lockdep_assert_held(&mvm->mutex);
903 961
904 if (mvm->scan_status != IWL_MVM_SCAN_SCHED) { 962 if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
905 IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n"); 963 IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
906 return; 964 return 0;
907 } 965 }
908 966
967 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
968 scan_done_notif,
969 ARRAY_SIZE(scan_done_notif),
970 NULL, NULL);
971
909 ret = iwl_mvm_send_sched_scan_abort(mvm); 972 ret = iwl_mvm_send_sched_scan_abort(mvm);
910 if (ret) 973 if (ret) {
911 IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret); 974 IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
912 else 975 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
913 IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n"); 976 return ret;
977 }
978
979 IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
980
981 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
982 if (ret)
983 return ret;
984
985 /*
986 * Clear the scan status so the next scan requests will succeed. This
987 * also ensures the Rx handler doesn't do anything, as the scan was
988 * stopped from above.
989 */
990 mvm->scan_status = IWL_MVM_SCAN_NONE;
991
992 return 0;
914} 993}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 3397f59cd4e4..f339ef884250 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,27 +66,27 @@
66#include "sta.h" 66#include "sta.h"
67#include "rs.h" 67#include "rs.h"
68 68
69static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6, 69static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
70 struct iwl_mvm_add_sta_cmd_v5 *cmd_v5) 70 struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
71{ 71{
72 memset(cmd_v5, 0, sizeof(*cmd_v5)); 72 memset(cmd_v5, 0, sizeof(*cmd_v5));
73 73
74 cmd_v5->add_modify = cmd_v6->add_modify; 74 cmd_v5->add_modify = cmd_v7->add_modify;
75 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx; 75 cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
76 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color; 76 cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
77 memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN); 77 memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
78 cmd_v5->sta_id = cmd_v6->sta_id; 78 cmd_v5->sta_id = cmd_v7->sta_id;
79 cmd_v5->modify_mask = cmd_v6->modify_mask; 79 cmd_v5->modify_mask = cmd_v7->modify_mask;
80 cmd_v5->station_flags = cmd_v6->station_flags; 80 cmd_v5->station_flags = cmd_v7->station_flags;
81 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk; 81 cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
82 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid; 82 cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
83 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid; 83 cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
84 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn; 84 cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
85 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count; 85 cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
86 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags; 86 cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
87 cmd_v5->assoc_id = cmd_v6->assoc_id; 87 cmd_v5->assoc_id = cmd_v7->assoc_id;
88 cmd_v5->beamform_flags = cmd_v6->beamform_flags; 88 cmd_v5->beamform_flags = cmd_v7->beamform_flags;
89 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk; 89 cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
90} 90}
91 91
92static void 92static void
@@ -110,7 +110,7 @@ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
110} 110}
111 111
112static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm, 112static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
113 struct iwl_mvm_add_sta_cmd_v6 *cmd, 113 struct iwl_mvm_add_sta_cmd_v7 *cmd,
114 int *status) 114 int *status)
115{ 115{
116 struct iwl_mvm_add_sta_cmd_v5 cmd_v5; 116 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -119,14 +119,14 @@ static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
119 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd), 119 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
120 cmd, status); 120 cmd, status);
121 121
122 iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5); 122 iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
123 123
124 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5), 124 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
125 &cmd_v5, status); 125 &cmd_v5, status);
126} 126}
127 127
128static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags, 128static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
129 struct iwl_mvm_add_sta_cmd_v6 *cmd) 129 struct iwl_mvm_add_sta_cmd_v7 *cmd)
130{ 130{
131 struct iwl_mvm_add_sta_cmd_v5 cmd_v5; 131 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
132 132
@@ -134,7 +134,7 @@ static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
134 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, 134 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
135 sizeof(*cmd), cmd); 135 sizeof(*cmd), cmd);
136 136
137 iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5); 137 iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
138 138
139 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5), 139 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
140 &cmd_v5); 140 &cmd_v5);
@@ -175,19 +175,30 @@ static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
175 &sta_cmd); 175 &sta_cmd);
176} 176}
177 177
178static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm) 178static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
179 enum nl80211_iftype iftype)
179{ 180{
180 int sta_id; 181 int sta_id;
182 u32 reserved_ids = 0;
181 183
184 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
182 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 185 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
183 186
184 lockdep_assert_held(&mvm->mutex); 187 lockdep_assert_held(&mvm->mutex);
185 188
189 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
190 if (iftype != NL80211_IFTYPE_STATION)
191 reserved_ids = BIT(0);
192
186 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 193 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
187 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) 194 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
195 if (BIT(sta_id) & reserved_ids)
196 continue;
197
188 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 198 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
189 lockdep_is_held(&mvm->mutex))) 199 lockdep_is_held(&mvm->mutex)))
190 return sta_id; 200 return sta_id;
201 }
191 return IWL_MVM_STATION_COUNT; 202 return IWL_MVM_STATION_COUNT;
192} 203}
193 204
@@ -196,7 +207,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
196 bool update) 207 bool update)
197{ 208{
198 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 209 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
199 struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd; 210 struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
200 int ret; 211 int ret;
201 u32 status; 212 u32 status;
202 u32 agg_size = 0, mpdu_dens = 0; 213 u32 agg_size = 0, mpdu_dens = 0;
@@ -312,7 +323,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
312 lockdep_assert_held(&mvm->mutex); 323 lockdep_assert_held(&mvm->mutex);
313 324
314 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 325 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
315 sta_id = iwl_mvm_find_free_sta_id(mvm); 326 sta_id = iwl_mvm_find_free_sta_id(mvm,
327 ieee80211_vif_type_p2p(vif));
316 else 328 else
317 sta_id = mvm_sta->sta_id; 329 sta_id = mvm_sta->sta_id;
318 330
@@ -368,7 +380,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
368int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 380int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
369 bool drain) 381 bool drain)
370{ 382{
371 struct iwl_mvm_add_sta_cmd_v6 cmd = {}; 383 struct iwl_mvm_add_sta_cmd_v7 cmd = {};
372 int ret; 384 int ret;
373 u32 status; 385 u32 status;
374 386
@@ -522,6 +534,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
522 534
523 /* unassoc - go ahead - remove the AP STA now */ 535 /* unassoc - go ahead - remove the AP STA now */
524 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; 536 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
537
538 /* clear d0i3_ap_sta_id if no longer relevant */
539 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
540 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
525 } 541 }
526 542
527 /* 543 /*
@@ -560,10 +576,10 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
560} 576}
561 577
562int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, 578int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
563 u32 qmask) 579 u32 qmask, enum nl80211_iftype iftype)
564{ 580{
565 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 581 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
566 sta->sta_id = iwl_mvm_find_free_sta_id(mvm); 582 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
567 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) 583 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
568 return -ENOSPC; 584 return -ENOSPC;
569 } 585 }
@@ -587,13 +603,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
587 const u8 *addr, 603 const u8 *addr,
588 u16 mac_id, u16 color) 604 u16 mac_id, u16 color)
589{ 605{
590 struct iwl_mvm_add_sta_cmd_v6 cmd; 606 struct iwl_mvm_add_sta_cmd_v7 cmd;
591 int ret; 607 int ret;
592 u32 status; 608 u32 status;
593 609
594 lockdep_assert_held(&mvm->mutex); 610 lockdep_assert_held(&mvm->mutex);
595 611
596 memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6)); 612 memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
597 cmd.sta_id = sta->sta_id; 613 cmd.sta_id = sta->sta_id;
598 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 614 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
599 color)); 615 color));
@@ -627,7 +643,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
627 lockdep_assert_held(&mvm->mutex); 643 lockdep_assert_held(&mvm->mutex);
628 644
629 /* Add the aux station, but without any queues */ 645 /* Add the aux station, but without any queues */
630 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0); 646 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
647 NL80211_IFTYPE_UNSPECIFIED);
631 if (ret) 648 if (ret)
632 return ret; 649 return ret;
633 650
@@ -699,7 +716,8 @@ int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
699 lockdep_assert_held(&mvm->mutex); 716 lockdep_assert_held(&mvm->mutex);
700 717
701 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); 718 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
702 ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask); 719 ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask,
720 ieee80211_vif_type_p2p(vif));
703 if (ret) 721 if (ret)
704 return ret; 722 return ret;
705 723
@@ -735,7 +753,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
735 int tid, u16 ssn, bool start) 753 int tid, u16 ssn, bool start)
736{ 754{
737 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 755 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
738 struct iwl_mvm_add_sta_cmd_v6 cmd = {}; 756 struct iwl_mvm_add_sta_cmd_v7 cmd = {};
739 int ret; 757 int ret;
740 u32 status; 758 u32 status;
741 759
@@ -794,7 +812,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
794 int tid, u8 queue, bool start) 812 int tid, u8 queue, bool start)
795{ 813{
796 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 814 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
797 struct iwl_mvm_add_sta_cmd_v6 cmd = {}; 815 struct iwl_mvm_add_sta_cmd_v7 cmd = {};
798 int ret; 816 int ret;
799 u32 status; 817 u32 status;
800 818
@@ -833,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
833 return ret; 851 return ret;
834} 852}
835 853
836static const u8 tid_to_ac[] = { 854const u8 tid_to_mac80211_ac[] = {
837 IEEE80211_AC_BE, 855 IEEE80211_AC_BE,
838 IEEE80211_AC_BK, 856 IEEE80211_AC_BK,
839 IEEE80211_AC_BK, 857 IEEE80211_AC_BK,
@@ -844,6 +862,17 @@ static const u8 tid_to_ac[] = {
844 IEEE80211_AC_VO, 862 IEEE80211_AC_VO,
845}; 863};
846 864
865static const u8 tid_to_ucode_ac[] = {
866 AC_BE,
867 AC_BK,
868 AC_BK,
869 AC_BE,
870 AC_VI,
871 AC_VI,
872 AC_VO,
873 AC_VO,
874};
875
847int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 876int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
848 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 877 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
849{ 878{
@@ -873,10 +902,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
873 return -EIO; 902 return -EIO;
874 } 903 }
875 904
905 spin_lock_bh(&mvmsta->lock);
906
907 /* possible race condition - we entered D0i3 while starting agg */
908 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
909 spin_unlock_bh(&mvmsta->lock);
910 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
911 return -EIO;
912 }
913
876 /* the new tx queue is still connected to the same mac80211 queue */ 914 /* the new tx queue is still connected to the same mac80211 queue */
877 mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]]; 915 mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
878 916
879 spin_lock_bh(&mvmsta->lock);
880 tid_data = &mvmsta->tid_data[tid]; 917 tid_data = &mvmsta->tid_data[tid];
881 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 918 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
882 tid_data->txq_id = txq_id; 919 tid_data->txq_id = txq_id;
@@ -916,7 +953,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
916 tid_data->ssn = 0xffff; 953 tid_data->ssn = 0xffff;
917 spin_unlock_bh(&mvmsta->lock); 954 spin_unlock_bh(&mvmsta->lock);
918 955
919 fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]]; 956 fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
920 957
921 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 958 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
922 if (ret) 959 if (ret)
@@ -1411,7 +1448,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1411 struct ieee80211_sta *sta) 1448 struct ieee80211_sta *sta)
1412{ 1449{
1413 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1450 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1414 struct iwl_mvm_add_sta_cmd_v6 cmd = { 1451 struct iwl_mvm_add_sta_cmd_v7 cmd = {
1415 .add_modify = STA_MODE_MODIFY, 1452 .add_modify = STA_MODE_MODIFY,
1416 .sta_id = mvmsta->sta_id, 1453 .sta_id = mvmsta->sta_id,
1417 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 1454 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1427,28 +1464,102 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1427void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 1464void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1428 struct ieee80211_sta *sta, 1465 struct ieee80211_sta *sta,
1429 enum ieee80211_frame_release_type reason, 1466 enum ieee80211_frame_release_type reason,
1430 u16 cnt) 1467 u16 cnt, u16 tids, bool more_data,
1468 bool agg)
1431{ 1469{
1432 u16 sleep_state_flags =
1433 (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
1434 STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
1435 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1470 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1436 struct iwl_mvm_add_sta_cmd_v6 cmd = { 1471 struct iwl_mvm_add_sta_cmd_v7 cmd = {
1437 .add_modify = STA_MODE_MODIFY, 1472 .add_modify = STA_MODE_MODIFY,
1438 .sta_id = mvmsta->sta_id, 1473 .sta_id = mvmsta->sta_id,
1439 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 1474 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
1440 .sleep_tx_count = cpu_to_le16(cnt), 1475 .sleep_tx_count = cpu_to_le16(cnt),
1441 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 1476 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1442 /*
1443 * Same modify mask for sleep_tx_count and sleep_state_flags so
1444 * we must set the sleep_state_flags too.
1445 */
1446 .sleep_state_flags = cpu_to_le16(sleep_state_flags),
1447 }; 1477 };
1448 int ret; 1478 int tid, ret;
1479 unsigned long _tids = tids;
1480
1481 /* convert TIDs to ACs - we don't support TSPEC so that's OK
1482 * Note that this field is reserved and unused by firmware not
1483 * supporting GO uAPSD, so it's safe to always do this.
1484 */
1485 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
1486 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
1487
1488 /* If we're releasing frames from aggregation queues then check if the
1489 * all queues combined that we're releasing frames from have
1490 * - more frames than the service period, in which case more_data
1491 * needs to be set
1492 * - fewer than 'cnt' frames, in which case we need to adjust the
1493 * firmware command (but do that unconditionally)
1494 */
1495 if (agg) {
1496 int remaining = cnt;
1497
1498 spin_lock_bh(&mvmsta->lock);
1499 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
1500 struct iwl_mvm_tid_data *tid_data;
1501 u16 n_queued;
1502
1503 tid_data = &mvmsta->tid_data[tid];
1504 if (WARN(tid_data->state != IWL_AGG_ON &&
1505 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
1506 "TID %d state is %d\n",
1507 tid, tid_data->state)) {
1508 spin_unlock_bh(&mvmsta->lock);
1509 ieee80211_sta_eosp(sta);
1510 return;
1511 }
1512
1513 n_queued = iwl_mvm_tid_queued(tid_data);
1514 if (n_queued > remaining) {
1515 more_data = true;
1516 remaining = 0;
1517 break;
1518 }
1519 remaining -= n_queued;
1520 }
1521 spin_unlock_bh(&mvmsta->lock);
1522
1523 cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
1524 if (WARN_ON(cnt - remaining == 0)) {
1525 ieee80211_sta_eosp(sta);
1526 return;
1527 }
1528 }
1529
1530 /* Note: this is ignored by firmware not supporting GO uAPSD */
1531 if (more_data)
1532 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
1533
1534 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
1535 mvmsta->next_status_eosp = true;
1536 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
1537 } else {
1538 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
1539 }
1449 1540
1450 /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
1451 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd); 1541 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
1452 if (ret) 1542 if (ret)
1453 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1543 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1454} 1544}
1545
1546int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
1547 struct iwl_rx_cmd_buffer *rxb,
1548 struct iwl_device_cmd *cmd)
1549{
1550 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1551 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
1552 struct ieee80211_sta *sta;
1553 u32 sta_id = le32_to_cpu(notif->sta_id);
1554
1555 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
1556 return 0;
1557
1558 rcu_read_lock();
1559 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1560 if (!IS_ERR_OR_NULL(sta))
1561 ieee80211_sta_eosp(sta);
1562 rcu_read_unlock();
1563
1564 return 0;
1565}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4968d0237dc5..2ed84c421481 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -195,24 +195,33 @@ struct iwl_mvm;
195/** 195/**
196 * DOC: AP mode - PS 196 * DOC: AP mode - PS
197 * 197 *
198 * When a station is asleep, the fw will set it as "asleep". All the 198 * When a station is asleep, the fw will set it as "asleep". All frames on
199 * non-aggregation frames to that station will be dropped by the fw 199 * shared queues (i.e. non-aggregation queues) to that station will be dropped
200 * (%TX_STATUS_FAIL_DEST_PS failure code). 200 * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
201 *
201 * AMPDUs are in a separate queue that is stopped by the fw. We just need to 202 * AMPDUs are in a separate queue that is stopped by the fw. We just need to
202 * let mac80211 know how many frames we have in these queues so that it can 203 * let mac80211 know when there are frames in these queues so that it can
203 * properly handle trigger frames. 204 * properly handle trigger frames.
204 * When the a trigger frame is received, mac80211 tells the driver to send 205 *
205 * frames from the AMPDU queues or AC queue depending on which queue are 206 * When a trigger frame is received, mac80211 tells the driver to send frames
206 * delivery-enabled and what TID has frames to transmit (Note that mac80211 has 207 * from the AMPDU queues or sends frames to non-aggregation queues itself,
207 * all the knowledege since all the non-agg frames are buffered / filtered, and 208 * depending on which ACs are delivery-enabled and what TID has frames to
208 * the driver tells mac80211 about agg frames). The driver needs to tell the fw 209 * transmit. Note that mac80211 has all the knowledege since all the non-agg
209 * to let frames out even if the station is asleep. This is done by 210 * frames are buffered / filtered, and the driver tells mac80211 about agg
210 * %iwl_mvm_sta_modify_sleep_tx_count. 211 * frames). The driver needs to tell the fw to let frames out even if the
211 * When we receive a frame from that station with PM bit unset, the 212 * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
212 * driver needs to let the fw know that this station isn't alseep any more. 213 *
213 * This is done by %iwl_mvm_sta_modify_ps_wake. 214 * When we receive a frame from that station with PM bit unset, the driver
214 * 215 * needs to let the fw know that this station isn't asleep any more. This is
215 * TODO - EOSP handling 216 * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
217 * station's wakeup.
218 *
219 * For a GO, the Service Period might be cut short due to an absence period
220 * of the GO. In this (and all other cases) the firmware notifies us with the
221 * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
222 * already sent to the device will be rejected again.
223 *
224 * See also "AP support for powersaving clients" in mac80211.h.
216 */ 225 */
217 226
218/** 227/**
@@ -261,6 +270,12 @@ struct iwl_mvm_tid_data {
261 u16 ssn; 270 u16 ssn;
262}; 271};
263 272
273static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
274{
275 return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
276 tid_data->next_reclaimed);
277}
278
264/** 279/**
265 * struct iwl_mvm_sta - representation of a station in the driver 280 * struct iwl_mvm_sta - representation of a station in the driver
266 * @sta_id: the index of the station in the fw (will be replaced by id_n_color) 281 * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
@@ -269,7 +284,11 @@ struct iwl_mvm_tid_data {
269 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for 284 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
270 * tid. 285 * tid.
271 * @max_agg_bufsize: the maximal size of the AGG buffer for this station 286 * @max_agg_bufsize: the maximal size of the AGG buffer for this station
287 * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
288 * by debugfs.
272 * @bt_reduced_txpower: is reduced tx power enabled for this station 289 * @bt_reduced_txpower: is reduced tx power enabled for this station
290 * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
291 * we need to signal the EOSP
273 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx 292 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
274 * and from Tx response flow, it needs a spinlock. 293 * and from Tx response flow, it needs a spinlock.
275 * @tid_data: per tid data. Look at %iwl_mvm_tid_data. 294 * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
@@ -287,7 +306,9 @@ struct iwl_mvm_sta {
287 u32 mac_id_n_color; 306 u32 mac_id_n_color;
288 u16 tid_disable_agg; 307 u16 tid_disable_agg;
289 u8 max_agg_bufsize; 308 u8 max_agg_bufsize;
309 bool bt_reduced_txpower_dbg;
290 bool bt_reduced_txpower; 310 bool bt_reduced_txpower;
311 bool next_status_eosp;
291 spinlock_t lock; 312 spinlock_t lock;
292 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; 313 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
293 struct iwl_lq_sta lq_sta; 314 struct iwl_lq_sta lq_sta;
@@ -345,6 +366,10 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
345 struct ieee80211_sta *sta, u32 iv32, 366 struct ieee80211_sta *sta, u32 iv32,
346 u16 *phase1key); 367 u16 *phase1key);
347 368
369int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
370 struct iwl_rx_cmd_buffer *rxb,
371 struct iwl_device_cmd *cmd);
372
348/* AMPDU */ 373/* AMPDU */
349int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 374int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
350 int tid, u16 ssn, bool start); 375 int tid, u16 ssn, bool start);
@@ -359,7 +384,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
359 384
360int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); 385int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
361int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, 386int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
362 u32 qmask); 387 u32 qmask, enum nl80211_iftype iftype);
363void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, 388void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
364 struct iwl_mvm_int_sta *sta); 389 struct iwl_mvm_int_sta *sta);
365int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 390int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -375,7 +400,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
375void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 400void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
376 struct ieee80211_sta *sta, 401 struct ieee80211_sta *sta,
377 enum ieee80211_frame_release_type reason, 402 enum ieee80211_frame_release_type reason,
378 u16 cnt); 403 u16 cnt, u16 tids, bool more_data,
404 bool agg);
379int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 405int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
380 bool drain); 406 bool drain);
381 407
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index b4c2abaa297b..61331245ad93 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -126,6 +126,7 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
126 * in iwl_mvm_te_handle_notif). 126 * in iwl_mvm_te_handle_notif).
127 */ 127 */
128 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 128 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
129 iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
129 130
130 /* 131 /*
131 * Of course, our status bit is just as racy as mac80211, so in 132 * Of course, our status bit is just as racy as mac80211, so in
@@ -210,6 +211,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
210 211
211 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 212 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
212 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 213 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
214 iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
213 ieee80211_ready_on_channel(mvm->hw); 215 ieee80211_ready_on_channel(mvm->hw);
214 } 216 }
215 } else { 217 } else {
@@ -436,7 +438,8 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
436 time_cmd.duration = cpu_to_le32(duration); 438 time_cmd.duration = cpu_to_le32(duration);
437 time_cmd.repeat = 1; 439 time_cmd.repeat = 1;
438 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 440 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
439 TE_V2_NOTIF_HOST_EVENT_END); 441 TE_V2_NOTIF_HOST_EVENT_END |
442 T2_V2_START_IMMEDIATELY);
440 443
441 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 444 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
442} 445}
@@ -551,7 +554,8 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
551 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); 554 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
552 time_cmd.repeat = 1; 555 time_cmd.repeat = 1;
553 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 556 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
554 TE_V2_NOTIF_HOST_EVENT_END); 557 TE_V2_NOTIF_HOST_EVENT_END |
558 T2_V2_START_IMMEDIATELY);
555 559
556 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 560 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
557} 561}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 3afa6b6bf835..7a99fa361954 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -403,7 +403,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
403 } 403 }
404} 404}
405 405
406static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff) 406void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
407{ 407{
408 struct iwl_host_cmd cmd = { 408 struct iwl_host_cmd cmd = {
409 .id = REPLY_THERMAL_MNG_BACKOFF, 409 .id = REPLY_THERMAL_MNG_BACKOFF,
@@ -412,6 +412,8 @@ static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
412 .flags = CMD_SYNC, 412 .flags = CMD_SYNC,
413 }; 413 };
414 414
415 backoff = max(backoff, mvm->thermal_throttle.min_backoff);
416
415 if (iwl_mvm_send_cmd(mvm, &cmd) == 0) { 417 if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
416 IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n", 418 IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
417 backoff); 419 backoff);
@@ -534,7 +536,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
534 .support_tx_backoff = true, 536 .support_tx_backoff = true,
535}; 537};
536 538
537void iwl_mvm_tt_initialize(struct iwl_mvm *mvm) 539void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
538{ 540{
539 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; 541 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
540 542
@@ -546,6 +548,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
546 tt->params = &iwl7000_tt_params; 548 tt->params = &iwl7000_tt_params;
547 549
548 tt->throttle = false; 550 tt->throttle = false;
551 tt->min_backoff = min_backoff;
549 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); 552 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
550} 553}
551 554
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 76ee486039d7..879aeac46cc1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -79,6 +79,7 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
79 __le16 fc = hdr->frame_control; 79 __le16 fc = hdr->frame_control;
80 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); 80 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
81 u32 len = skb->len + FCS_LEN; 81 u32 len = skb->len + FCS_LEN;
82 u8 ac;
82 83
83 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 84 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
84 tx_flags |= TX_CMD_FLG_ACK; 85 tx_flags |= TX_CMD_FLG_ACK;
@@ -90,13 +91,6 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
90 else if (ieee80211_is_back_req(fc)) 91 else if (ieee80211_is_back_req(fc))
91 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; 92 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
92 93
93 /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
94 if (info->band == IEEE80211_BAND_2GHZ &&
95 (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
96 is_multicast_ether_addr(hdr->addr1) ||
97 ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
98 tx_flags |= TX_CMD_FLG_BT_DIS;
99
100 if (ieee80211_has_morefrags(fc)) 94 if (ieee80211_has_morefrags(fc))
101 tx_flags |= TX_CMD_FLG_MORE_FRAG; 95 tx_flags |= TX_CMD_FLG_MORE_FRAG;
102 96
@@ -112,6 +106,11 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
112 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 106 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
113 } 107 }
114 108
109 /* tid_tspec will default to 0 = BE when QOS isn't enabled */
110 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
111 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
112 TX_CMD_FLG_BT_PRIO_POS;
113
115 if (ieee80211_is_mgmt(fc)) { 114 if (ieee80211_is_mgmt(fc)) {
116 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 115 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
117 tx_cmd->pm_frame_timeout = cpu_to_le16(3); 116 tx_cmd->pm_frame_timeout = cpu_to_le16(3);
@@ -122,15 +121,12 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
122 * it 121 * it
123 */ 122 */
124 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); 123 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
125 } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { 124 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
126 tx_cmd->pm_frame_timeout = cpu_to_le16(2); 125 tx_cmd->pm_frame_timeout = cpu_to_le16(2);
127 } else { 126 } else {
128 tx_cmd->pm_frame_timeout = 0; 127 tx_cmd->pm_frame_timeout = 0;
129 } 128 }
130 129
131 if (info->flags & IEEE80211_TX_CTL_AMPDU)
132 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
133
134 if (ieee80211_is_data(fc) && len > mvm->rts_threshold && 130 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
135 !is_multicast_ether_addr(ieee80211_get_DA(hdr))) 131 !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
136 tx_flags |= TX_CMD_FLG_PROT_REQUIRE; 132 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
@@ -207,7 +203,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
207 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); 203 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
208 204
209 mvm->mgmt_last_antenna_idx = 205 mvm->mgmt_last_antenna_idx =
210 iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw), 206 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
211 mvm->mgmt_last_antenna_idx); 207 mvm->mgmt_last_antenna_idx);
212 rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 208 rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
213 209
@@ -377,6 +373,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
377 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 373 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
378 /* From now on, we cannot access info->control */ 374 /* From now on, we cannot access info->control */
379 375
376 /*
377 * we handle that entirely ourselves -- for uAPSD the firmware
378 * will always send a notification, and for PS-Poll responses
379 * we'll notify mac80211 when getting frame status
380 */
381 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
382
380 spin_lock(&mvmsta->lock); 383 spin_lock(&mvmsta->lock);
381 384
382 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { 385 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
@@ -437,6 +440,17 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
437 440
438 lockdep_assert_held(&mvmsta->lock); 441 lockdep_assert_held(&mvmsta->lock);
439 442
443 if ((tid_data->state == IWL_AGG_ON ||
444 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
445 iwl_mvm_tid_queued(tid_data) == 0) {
446 /*
447 * Now that this aggregation queue is empty tell mac80211 so it
448 * knows we no longer have frames buffered for the station on
449 * this TID (for the TIM bitmap calculation.)
450 */
451 ieee80211_sta_set_buffered(sta, tid, false);
452 }
453
440 if (tid_data->ssn != tid_data->next_reclaimed) 454 if (tid_data->ssn != tid_data->next_reclaimed)
441 return; 455 return;
442 456
@@ -680,6 +694,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
680 iwl_mvm_check_ratid_empty(mvm, sta, tid); 694 iwl_mvm_check_ratid_empty(mvm, sta, tid);
681 spin_unlock_bh(&mvmsta->lock); 695 spin_unlock_bh(&mvmsta->lock);
682 } 696 }
697
698 if (mvmsta->next_status_eosp) {
699 mvmsta->next_status_eosp = false;
700 ieee80211_sta_eosp(sta);
701 }
683 } else { 702 } else {
684 mvmsta = NULL; 703 mvmsta = NULL;
685 } 704 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 86989df69356..d619851745a1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -289,8 +289,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
289 return last_idx; 289 return last_idx;
290} 290}
291 291
292static struct { 292static const struct {
293 char *name; 293 const char *name;
294 u8 num; 294 u8 num;
295} advanced_lookup[] = { 295} advanced_lookup[] = {
296 { "NMI_INTERRUPT_WDG", 0x34 }, 296 { "NMI_INTERRUPT_WDG", 0x34 },
@@ -376,9 +376,67 @@ struct iwl_error_event_table {
376 u32 flow_handler; /* FH read/write pointers, RX credit */ 376 u32 flow_handler; /* FH read/write pointers, RX credit */
377} __packed; 377} __packed;
378 378
379/*
380 * UMAC error struct - relevant starting from family 8000 chip.
381 * Note: This structure is read from the device with IO accesses,
382 * and the reading already does the endian conversion. As it is
383 * read with u32-sized accesses, any members with a different size
384 * need to be ordered correctly though!
385 */
386struct iwl_umac_error_event_table {
387 u32 valid; /* (nonzero) valid, (0) log is empty */
388 u32 error_id; /* type of error */
389 u32 pc; /* program counter */
390 u32 blink1; /* branch link */
391 u32 blink2; /* branch link */
392 u32 ilink1; /* interrupt link */
393 u32 ilink2; /* interrupt link */
394 u32 data1; /* error-specific data */
395 u32 data2; /* error-specific data */
396 u32 line; /* source code line of error */
397 u32 umac_ver; /* umac version */
398} __packed;
399
379#define ERROR_START_OFFSET (1 * sizeof(u32)) 400#define ERROR_START_OFFSET (1 * sizeof(u32))
380#define ERROR_ELEM_SIZE (7 * sizeof(u32)) 401#define ERROR_ELEM_SIZE (7 * sizeof(u32))
381 402
403static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
404{
405 struct iwl_trans *trans = mvm->trans;
406 struct iwl_umac_error_event_table table;
407 u32 base;
408
409 base = mvm->umac_error_event_table;
410
411 if (base < 0x800000 || base >= 0x80C000) {
412 IWL_ERR(mvm,
413 "Not valid error log pointer 0x%08X for %s uCode\n",
414 base,
415 (mvm->cur_ucode == IWL_UCODE_INIT)
416 ? "Init" : "RT");
417 return;
418 }
419
420 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
421
422 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
423 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
424 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
425 mvm->status, table.valid);
426 }
427
428 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
429 desc_lookup(table.error_id));
430 IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc);
431 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
432 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
433 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
434 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
435 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
436 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
437 IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver);
438}
439
382void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) 440void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
383{ 441{
384 struct iwl_trans *trans = mvm->trans; 442 struct iwl_trans *trans = mvm->trans;
@@ -394,7 +452,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
394 base = mvm->fw->inst_errlog_ptr; 452 base = mvm->fw->inst_errlog_ptr;
395 } 453 }
396 454
397 if (base < 0x800000 || base >= 0x80C000) { 455 if (base < 0x800000) {
398 IWL_ERR(mvm, 456 IWL_ERR(mvm,
399 "Not valid error log pointer 0x%08X for %s uCode\n", 457 "Not valid error log pointer 0x%08X for %s uCode\n",
400 base, 458 base,
@@ -453,29 +511,31 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
453 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 511 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
454 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); 512 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
455 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); 513 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
514
515 if (mvm->support_umac_log)
516 iwl_mvm_dump_umac_error_log(mvm);
456} 517}
457 518
458void iwl_mvm_dump_sram(struct iwl_mvm *mvm) 519void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
459{ 520{
460 const struct fw_img *img; 521 const struct fw_img *img;
461 int ofs, len = 0; 522 u32 ofs, sram_len;
462 u8 *buf; 523 void *sram;
463 524
464 if (!mvm->ucode_loaded) 525 if (!mvm->ucode_loaded || mvm->fw_error_sram)
465 return; 526 return;
466 527
467 img = &mvm->fw->img[mvm->cur_ucode]; 528 img = &mvm->fw->img[mvm->cur_ucode];
468 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; 529 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
469 len = img->sec[IWL_UCODE_SECTION_DATA].len; 530 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
470 531
471 buf = kzalloc(len, GFP_ATOMIC); 532 sram = kzalloc(sram_len, GFP_ATOMIC);
472 if (!buf) 533 if (!sram)
473 return; 534 return;
474 535
475 iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len); 536 iwl_trans_read_mem_bytes(mvm->trans, ofs, sram, sram_len);
476 iwl_print_hex_error(mvm->trans, buf, len); 537 mvm->fw_error_sram = sram;
477 538 mvm->fw_error_sram_len = sram_len;
478 kfree(buf);
479} 539}
480 540
481/** 541/**
@@ -516,15 +576,20 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
516 enum ieee80211_smps_mode smps_request) 576 enum ieee80211_smps_mode smps_request)
517{ 577{
518 struct iwl_mvm_vif *mvmvif; 578 struct iwl_mvm_vif *mvmvif;
519 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; 579 enum ieee80211_smps_mode smps_mode;
520 int i; 580 int i;
521 581
522 lockdep_assert_held(&mvm->mutex); 582 lockdep_assert_held(&mvm->mutex);
523 583
524 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ 584 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
525 if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1) 585 if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
526 return; 586 return;
527 587
588 if (vif->type == NL80211_IFTYPE_AP)
589 smps_mode = IEEE80211_SMPS_OFF;
590 else
591 smps_mode = IEEE80211_SMPS_AUTOMATIC;
592
528 mvmvif = iwl_mvm_vif_from_mac80211(vif); 593 mvmvif = iwl_mvm_vif_from_mac80211(vif);
529 mvmvif->smps_requests[req_type] = smps_request; 594 mvmvif->smps_requests[req_type] = smps_request;
530 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { 595 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
@@ -538,3 +603,44 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
538 603
539 ieee80211_request_smps(vif, smps_mode); 604 ieee80211_request_smps(vif, smps_mode);
540} 605}
606
607int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
608 bool value)
609{
610 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
611 int res;
612
613 lockdep_assert_held(&mvm->mutex);
614
615 if (mvmvif->low_latency == value)
616 return 0;
617
618 mvmvif->low_latency = value;
619
620 res = iwl_mvm_update_quotas(mvm, NULL);
621 if (res)
622 return res;
623
624 iwl_mvm_bt_coex_vif_change(mvm);
625
626 return iwl_mvm_power_update_mac(mvm, vif);
627}
628
629static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
630{
631 bool *result = _data;
632
633 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
634 *result = true;
635}
636
637bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
638{
639 bool result = false;
640
641 ieee80211_iterate_active_interfaces_atomic(
642 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
643 iwl_mvm_ll_iter, &result);
644
645 return result;
646}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3872ead75488..edb015c99049 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -66,6 +66,7 @@
66#include <linux/module.h> 66#include <linux/module.h>
67#include <linux/pci.h> 67#include <linux/pci.h>
68#include <linux/pci-aspm.h> 68#include <linux/pci-aspm.h>
69#include <linux/acpi.h>
69 70
70#include "iwl-trans.h" 71#include "iwl-trans.h"
71#include "iwl-drv.h" 72#include "iwl-drv.h"
@@ -389,12 +390,92 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
389 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, 390 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
390 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 391 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
391 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 392 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
393
394/* 8000 Series */
395 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
396 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
392#endif /* CONFIG_IWLMVM */ 397#endif /* CONFIG_IWLMVM */
393 398
394 {0} 399 {0}
395}; 400};
396MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 401MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
397 402
403#ifdef CONFIG_ACPI
404#define SPL_METHOD "SPLC"
405#define SPL_DOMAINTYPE_MODULE BIT(0)
406#define SPL_DOMAINTYPE_WIFI BIT(1)
407#define SPL_DOMAINTYPE_WIGIG BIT(2)
408#define SPL_DOMAINTYPE_RFEM BIT(3)
409
410static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
411{
412 union acpi_object *limits, *domain_type, *power_limit;
413
414 if (splx->type != ACPI_TYPE_PACKAGE ||
415 splx->package.count != 2 ||
416 splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
417 splx->package.elements[0].integer.value != 0) {
418 IWL_ERR(trans, "Unsupported splx structure");
419 return 0;
420 }
421
422 limits = &splx->package.elements[1];
423 if (limits->type != ACPI_TYPE_PACKAGE ||
424 limits->package.count < 2 ||
425 limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
426 limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
427 IWL_ERR(trans, "Invalid limits element");
428 return 0;
429 }
430
431 domain_type = &limits->package.elements[0];
432 power_limit = &limits->package.elements[1];
433 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
434 IWL_DEBUG_INFO(trans, "WiFi power is not limited");
435 return 0;
436 }
437
438 return power_limit->integer.value;
439}
440
441static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
442{
443 acpi_handle pxsx_handle;
444 acpi_handle handle;
445 struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
446 acpi_status status;
447
448 pxsx_handle = ACPI_HANDLE(&pdev->dev);
449 if (!pxsx_handle) {
450 IWL_DEBUG_INFO(trans,
451 "Could not retrieve root port ACPI handle");
452 return;
453 }
454
455 /* Get the method's handle */
456 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
457 if (ACPI_FAILURE(status)) {
458 IWL_DEBUG_INFO(trans, "SPL method not found");
459 return;
460 }
461
462 /* Call SPLC with no arguments */
463 status = acpi_evaluate_object(handle, NULL, NULL, &splx);
464 if (ACPI_FAILURE(status)) {
465 IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
466 return;
467 }
468
469 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
470 IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
471 trans->dflt_pwr_limit);
472 kfree(splx.pointer);
473}
474
475#else /* CONFIG_ACPI */
476static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
477#endif
478
398/* PCI registers */ 479/* PCI registers */
399#define PCI_CFG_RETRY_TIMEOUT 0x041 480#define PCI_CFG_RETRY_TIMEOUT 0x041
400 481
@@ -419,6 +500,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
419 goto out_free_trans; 500 goto out_free_trans;
420 } 501 }
421 502
503 set_dflt_pwr_limit(iwl_trans, pdev);
504
422 /* register transport layer debugfs here */ 505 /* register transport layer debugfs here */
423 ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir); 506 ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
424 if (ret) 507 if (ret)
@@ -477,7 +560,7 @@ static int iwl_pci_resume(struct device *device)
477 iwl_enable_rfkill_int(trans); 560 iwl_enable_rfkill_int(trans);
478 561
479 hw_rfkill = iwl_is_rfkill_set(trans); 562 hw_rfkill = iwl_is_rfkill_set(trans);
480 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 563 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
481 564
482 return 0; 565 return 0;
483} 566}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index e851f26fd44c..9091513ea738 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -304,7 +304,7 @@ struct iwl_trans_pcie {
304 bool bc_table_dword; 304 bool bc_table_dword;
305 u32 rx_page_order; 305 u32 rx_page_order;
306 306
307 const char **command_names; 307 const char *const *command_names;
308 308
309 /* queue watchdog */ 309 /* queue watchdog */
310 unsigned long wd_timeout; 310 unsigned long wd_timeout;
@@ -488,4 +488,6 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
488 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 488 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
489} 489}
490 490
491void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
492
491#endif /* __iwl_trans_int_pcie_h__ */ 493#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08c23d497a02..fdfa3969cac9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -155,37 +155,26 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
155 if (rxq->need_update == 0) 155 if (rxq->need_update == 0)
156 goto exit_unlock; 156 goto exit_unlock;
157 157
158 if (trans->cfg->base_params->shadow_reg_enable) { 158 /*
159 /* shadow register enabled */ 159 * explicitly wake up the NIC if:
160 /* Device expects a multiple of 8 */ 160 * 1. shadow registers aren't enabled
161 rxq->write_actual = (rxq->write & ~0x7); 161 * 2. there is a chance that the NIC is asleep
162 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 162 */
163 } else { 163 if (!trans->cfg->base_params->shadow_reg_enable &&
164 /* If power-saving is in use, make sure device is awake */ 164 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
165 if (test_bit(STATUS_TPOWER_PMI, &trans->status)) { 165 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
166 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 166
167 167 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
168 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 168 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
169 IWL_DEBUG_INFO(trans, 169 reg);
170 "Rx queue requesting wakeup," 170 iwl_set_bit(trans, CSR_GP_CNTRL,
171 " GP1 = 0x%x\n", reg); 171 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
172 iwl_set_bit(trans, CSR_GP_CNTRL, 172 goto exit_unlock;
173 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
174 goto exit_unlock;
175 }
176
177 rxq->write_actual = (rxq->write & ~0x7);
178 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
179 rxq->write_actual);
180
181 /* Else device is assumed to be awake */
182 } else {
183 /* Device expects a multiple of 8 */
184 rxq->write_actual = (rxq->write & ~0x7);
185 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
186 rxq->write_actual);
187 } 173 }
188 } 174 }
175
176 rxq->write_actual = round_down(rxq->write, 8);
177 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
189 rxq->need_update = 0; 178 rxq->need_update = 0;
190 179
191 exit_unlock: 180 exit_unlock:
@@ -802,10 +791,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
802 791
803static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 792static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
804{ 793{
805 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
806 u32 inta; 794 u32 inta;
807 795
808 lockdep_assert_held(&trans_pcie->irq_lock); 796 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
809 797
810 trace_iwlwifi_dev_irq(trans->dev); 798 trace_iwlwifi_dev_irq(trans->dev);
811 799
@@ -1006,7 +994,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1006 994
1007 isr_stats->rfkill++; 995 isr_stats->rfkill++;
1008 996
1009 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 997 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1010 if (hw_rfkill) { 998 if (hw_rfkill) {
1011 set_bit(STATUS_RFKILL, &trans->status); 999 set_bit(STATUS_RFKILL, &trans->status);
1012 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1000 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f9507807b486..dcfd6d866d09 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -75,6 +75,20 @@
75#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "internal.h" 76#include "internal.h"
77 77
78static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
79{
80 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
81 ((reg & 0x0000ffff) | (2 << 28)));
82 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
83}
84
85static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
86{
87 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
88 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
89 ((reg & 0x0000ffff) | (3 << 28)));
90}
91
78static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 92static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
79{ 93{
80 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 94 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -89,6 +103,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
89 103
90/* PCI registers */ 104/* PCI registers */
91#define PCI_CFG_RETRY_TIMEOUT 0x041 105#define PCI_CFG_RETRY_TIMEOUT 0x041
106#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
92 107
93static void iwl_pcie_apm_config(struct iwl_trans *trans) 108static void iwl_pcie_apm_config(struct iwl_trans *trans)
94{ 109{
@@ -132,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
132 */ 147 */
133 148
134 /* Disable L0S exit timer (platform NMI Work/Around) */ 149 /* Disable L0S exit timer (platform NMI Work/Around) */
135 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 150 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
136 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 151 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
152 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
137 153
138 /* 154 /*
139 * Disable L0s without affecting L1; 155 * Disable L0s without affecting L1;
@@ -203,19 +219,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
203 /* 219 /*
204 * Enable DMA clock and wait for it to stabilize. 220 * Enable DMA clock and wait for it to stabilize.
205 * 221 *
206 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits 222 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
207 * do not disable clocks. This preserves any hardware bits already 223 * bits do not disable clocks. This preserves any hardware
208 * set by default in "CLK_CTRL_REG" after reset. 224 * bits already set by default in "CLK_CTRL_REG" after reset.
209 */ 225 */
210 iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 226 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
211 udelay(20); 227 iwl_write_prph(trans, APMG_CLK_EN_REG,
228 APMG_CLK_VAL_DMA_CLK_RQT);
229 udelay(20);
212 230
213 /* Disable L1-Active */ 231 /* Disable L1-Active */
214 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 232 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
215 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 233 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
216 234
217 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 235 /* Clear the interrupt in APMG if the NIC is in RFKILL */
218 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL); 236 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
237 APMG_RTC_INT_STT_RFKILL);
238 }
219 239
220 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 240 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
221 241
@@ -223,6 +243,116 @@ out:
223 return ret; 243 return ret;
224} 244}
225 245
246/*
247 * Enable LP XTAL to avoid HW bug where device may consume much power if
248 * FW is not loaded after device reset. LP XTAL is disabled by default
249 * after device HW reset. Do it only if XTAL is fed by internal source.
250 * Configure device's "persistence" mode to avoid resetting XTAL again when
251 * SHRD_HW_RST occurs in S3.
252 */
253static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
254{
255 int ret;
256 u32 apmg_gp1_reg;
257 u32 apmg_xtal_cfg_reg;
258 u32 dl_cfg_reg;
259
260 /* Force XTAL ON */
261 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
262 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
263
264 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
265 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
266
267 udelay(10);
268
269 /*
270 * Set "initialization complete" bit to move adapter from
271 * D0U* --> D0A* (powered-up active) state.
272 */
273 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
274
275 /*
276 * Wait for clock stabilization; once stabilized, access to
277 * device-internal resources is possible.
278 */
279 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
280 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
281 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
282 25000);
283 if (WARN_ON(ret < 0)) {
284 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
285 /* Release XTAL ON request */
286 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
287 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
288 return;
289 }
290
291 /*
292 * Clear "disable persistence" to avoid LP XTAL resetting when
293 * SHRD_HW_RST is applied in S3.
294 */
295 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
296 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
297
298 /*
299 * Force APMG XTAL to be active to prevent its disabling by HW
300 * caused by APMG idle state.
301 */
302 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
303 SHR_APMG_XTAL_CFG_REG);
304 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
305 apmg_xtal_cfg_reg |
306 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
307
308 /*
309 * Reset entire device again - do controller reset (results in
310 * SHRD_HW_RST). Turn MAC off before proceeding.
311 */
312 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
313
314 udelay(10);
315
316 /* Enable LP XTAL by indirect access through CSR */
317 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
318 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
319 SHR_APMG_GP1_WF_XTAL_LP_EN |
320 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
321
322 /* Clear delay line clock power up */
323 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
324 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
325 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
326
327 /*
328 * Enable persistence mode to avoid LP XTAL resetting when
329 * SHRD_HW_RST is applied in S3.
330 */
331 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
332 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
333
334 /*
335 * Clear "initialization complete" bit to move adapter from
336 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
337 */
338 iwl_clear_bit(trans, CSR_GP_CNTRL,
339 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
340
341 /* Activates XTAL resources monitor */
342 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
343 CSR_MONITOR_XTAL_RESOURCES);
344
345 /* Release XTAL ON request */
346 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
347 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
348 udelay(10);
349
350 /* Release APMG XTAL */
351 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
352 apmg_xtal_cfg_reg &
353 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
354}
355
226static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) 356static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
227{ 357{
228 int ret = 0; 358 int ret = 0;
@@ -250,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
250 /* Stop device's DMA activity */ 380 /* Stop device's DMA activity */
251 iwl_pcie_apm_stop_master(trans); 381 iwl_pcie_apm_stop_master(trans);
252 382
383 if (trans->cfg->lp_xtal_workaround) {
384 iwl_pcie_apm_lp_xtal_enable(trans);
385 return;
386 }
387
253 /* Reset the entire device */ 388 /* Reset the entire device */
254 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 389 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
255 390
@@ -273,7 +408,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
273 408
274 spin_unlock(&trans_pcie->irq_lock); 409 spin_unlock(&trans_pcie->irq_lock);
275 410
276 iwl_pcie_set_pwr(trans, false); 411 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
412 iwl_pcie_set_pwr(trans, false);
277 413
278 iwl_op_mode_nic_config(trans->op_mode); 414 iwl_op_mode_nic_config(trans->op_mode);
279 415
@@ -435,78 +571,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
435 return ret; 571 return ret;
436} 572}
437 573
438static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu) 574static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
575 const struct fw_img *image,
576 int cpu,
577 int *first_ucode_section)
439{ 578{
440 int shift_param; 579 int shift_param;
441 u32 address; 580 int i, ret = 0;
442 int ret = 0; 581 u32 last_read_idx = 0;
443 582
444 if (cpu == 1) { 583 if (cpu == 1) {
445 shift_param = 0; 584 shift_param = 0;
446 address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR; 585 *first_ucode_section = 0;
447 } else { 586 } else {
448 shift_param = 16; 587 shift_param = 16;
449 address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR; 588 (*first_ucode_section)++;
450 } 589 }
451 590
452 /* set CPU to started */ 591 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
453 iwl_trans_set_bits_mask(trans, 592 last_read_idx = i;
454 CSR_UCODE_LOAD_STATUS_ADDR,
455 CSR_CPU_STATUS_LOADING_STARTED << shift_param,
456 1);
457
458 /* set last complete descriptor number */
459 iwl_trans_set_bits_mask(trans,
460 CSR_UCODE_LOAD_STATUS_ADDR,
461 CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
462 << shift_param,
463 1);
464
465 /* set last loaded block */
466 iwl_trans_set_bits_mask(trans,
467 CSR_UCODE_LOAD_STATUS_ADDR,
468 CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
469 << shift_param,
470 1);
471 593
594 if (!image->sec[i].data ||
595 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
596 IWL_DEBUG_FW(trans,
597 "Break since Data not valid or Empty section, sec = %d\n",
598 i);
599 break;
600 }
601
602 if (i == (*first_ucode_section) + 1)
603 /* set CPU to started */
604 iwl_set_bits_prph(trans,
605 CSR_UCODE_LOAD_STATUS_ADDR,
606 LMPM_CPU_HDRS_LOADING_COMPLETED
607 << shift_param);
608
609 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
610 if (ret)
611 return ret;
612 }
472 /* image loading complete */ 613 /* image loading complete */
473 iwl_trans_set_bits_mask(trans, 614 iwl_set_bits_prph(trans,
474 CSR_UCODE_LOAD_STATUS_ADDR, 615 CSR_UCODE_LOAD_STATUS_ADDR,
475 CSR_CPU_STATUS_LOADING_COMPLETED 616 LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
476 << shift_param, 617
477 1); 618 *first_ucode_section = last_read_idx;
478 619
479 /* set FH_TCSR_0_REG */ 620 return 0;
480 iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1); 621}
481 622
482 /* verify image verification started */ 623static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
483 ret = iwl_poll_bit(trans, address, 624 const struct fw_img *image,
484 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS, 625 int cpu,
485 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS, 626 int *first_ucode_section)
486 CSR_SECURE_TIME_OUT); 627{
487 if (ret < 0) { 628 int shift_param;
488 IWL_ERR(trans, "secure boot process didn't start\n"); 629 int i, ret = 0;
489 return ret; 630 u32 last_read_idx = 0;
631
632 if (cpu == 1) {
633 shift_param = 0;
634 *first_ucode_section = 0;
635 } else {
636 shift_param = 16;
637 (*first_ucode_section)++;
490 } 638 }
491 639
492 /* wait for image verification to complete */ 640 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
493 ret = iwl_poll_bit(trans, address, 641 last_read_idx = i;
494 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
495 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
496 CSR_SECURE_TIME_OUT);
497 642
498 if (ret < 0) { 643 if (!image->sec[i].data ||
499 IWL_ERR(trans, "Time out on secure boot process\n"); 644 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
500 return ret; 645 IWL_DEBUG_FW(trans,
646 "Break since Data not valid or Empty section, sec = %d\n",
647 i);
648 break;
649 }
650
651 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
652 if (ret)
653 return ret;
501 } 654 }
502 655
656 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
657 iwl_set_bits_prph(trans,
658 CSR_UCODE_LOAD_STATUS_ADDR,
659 (LMPM_CPU_UCODE_LOADING_COMPLETED |
660 LMPM_CPU_HDRS_LOADING_COMPLETED |
661 LMPM_CPU_UCODE_LOADING_STARTED) <<
662 shift_param);
663
664 *first_ucode_section = last_read_idx;
665
503 return 0; 666 return 0;
504} 667}
505 668
506static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 669static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
507 const struct fw_img *image) 670 const struct fw_img *image)
508{ 671{
509 int i, ret = 0; 672 int ret = 0;
673 int first_ucode_section;
510 674
511 IWL_DEBUG_FW(trans, 675 IWL_DEBUG_FW(trans,
512 "working with %s image\n", 676 "working with %s image\n",
@@ -518,53 +682,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
518 /* configure the ucode to be ready to get the secured image */ 682 /* configure the ucode to be ready to get the secured image */
519 if (image->is_secure) { 683 if (image->is_secure) {
520 /* set secure boot inspector addresses */ 684 /* set secure boot inspector addresses */
521 iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0); 685 iwl_write_prph(trans,
522 iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0); 686 LMPM_SECURE_INSPECTOR_CODE_ADDR,
523 687 LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
524 /* release CPU1 reset if secure inspector image burned in OTP */ 688
525 iwl_write32(trans, CSR_RESET, 0); 689 iwl_write_prph(trans,
526 } 690 LMPM_SECURE_INSPECTOR_DATA_ADDR,
527 691 LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
528 /* load to FW the binary sections of CPU1 */ 692
529 IWL_DEBUG_INFO(trans, "Loading CPU1\n"); 693 /* set CPU1 header address */
530 for (i = 0; 694 iwl_write_prph(trans,
531 i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU; 695 LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
532 i++) { 696 LMPM_SECURE_CPU1_HDR_MEM_SPACE);
533 if (!image->sec[i].data) 697
534 break; 698 /* load to FW the binary Secured sections of CPU1 */
535 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 699 ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
700 &first_ucode_section);
536 if (ret) 701 if (ret)
537 return ret; 702 return ret;
538 }
539 703
540 /* configure the ucode to start secure process on CPU1 */ 704 } else {
541 if (image->is_secure) { 705 /* load to FW the binary Non secured sections of CPU1 */
542 /* config CPU1 to start secure protocol */ 706 ret = iwl_pcie_load_cpu_sections(trans, image, 1,
543 ret = iwl_pcie_secure_set(trans, 1); 707 &first_ucode_section);
544 if (ret) 708 if (ret)
545 return ret; 709 return ret;
546 } else {
547 /* Remove all resets to allow NIC to operate */
548 iwl_write32(trans, CSR_RESET, 0);
549 } 710 }
550 711
551 if (image->is_dual_cpus) { 712 if (image->is_dual_cpus) {
713 /* set CPU2 header address */
714 iwl_write_prph(trans,
715 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
716 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
717
552 /* load to FW the binary sections of CPU2 */ 718 /* load to FW the binary sections of CPU2 */
553 IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n"); 719 if (image->is_secure)
554 for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU; 720 ret = iwl_pcie_load_cpu_secured_sections(
555 i < IWL_UCODE_SECTION_MAX; i++) { 721 trans, image, 2,
556 if (!image->sec[i].data) 722 &first_ucode_section);
557 break; 723 else
558 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 724 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
559 if (ret) 725 &first_ucode_section);
560 return ret; 726 if (ret)
561 } 727 return ret;
728 }
729
730 /* release CPU reset */
731 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
732 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
733 else
734 iwl_write32(trans, CSR_RESET, 0);
562 735
563 if (image->is_secure) { 736 if (image->is_secure) {
564 /* set CPU2 for secure protocol */ 737 /* wait for image verification to complete */
565 ret = iwl_pcie_secure_set(trans, 2); 738 ret = iwl_poll_prph_bit(trans,
566 if (ret) 739 LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
567 return ret; 740 LMPM_SECURE_BOOT_STATUS_SUCCESS,
741 LMPM_SECURE_BOOT_STATUS_SUCCESS,
742 LMPM_SECURE_TIME_OUT);
743
744 if (ret < 0) {
745 IWL_ERR(trans, "Time out on secure boot process\n");
746 return ret;
568 } 747 }
569 } 748 }
570 749
@@ -591,7 +770,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
591 set_bit(STATUS_RFKILL, &trans->status); 770 set_bit(STATUS_RFKILL, &trans->status);
592 else 771 else
593 clear_bit(STATUS_RFKILL, &trans->status); 772 clear_bit(STATUS_RFKILL, &trans->status);
594 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 773 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
595 if (hw_rfkill && !run_in_rfkill) 774 if (hw_rfkill && !run_in_rfkill)
596 return -ERFKILL; 775 return -ERFKILL;
597 776
@@ -706,7 +885,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
706 else 885 else
707 clear_bit(STATUS_RFKILL, &trans->status); 886 clear_bit(STATUS_RFKILL, &trans->status);
708 if (hw_rfkill != was_hw_rfkill) 887 if (hw_rfkill != was_hw_rfkill)
709 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 888 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
889}
890
891void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
892{
893 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
894 iwl_trans_pcie_stop_device(trans);
710} 895}
711 896
712static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) 897static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -815,7 +1000,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
815 set_bit(STATUS_RFKILL, &trans->status); 1000 set_bit(STATUS_RFKILL, &trans->status);
816 else 1001 else
817 clear_bit(STATUS_RFKILL, &trans->status); 1002 clear_bit(STATUS_RFKILL, &trans->status);
818 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 1003 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
819 1004
820 return 0; 1005 return 0;
821} 1006}
@@ -1158,6 +1343,7 @@ static const char *get_csr_string(int cmd)
1158 IWL_CMD(CSR_GIO_CHICKEN_BITS); 1343 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1159 IWL_CMD(CSR_ANA_PLL_CFG); 1344 IWL_CMD(CSR_ANA_PLL_CFG);
1160 IWL_CMD(CSR_HW_REV_WA_REG); 1345 IWL_CMD(CSR_HW_REV_WA_REG);
1346 IWL_CMD(CSR_MONITOR_STATUS_REG);
1161 IWL_CMD(CSR_DBG_HPET_MEM_REG); 1347 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1162 default: 1348 default:
1163 return "UNKNOWN"; 1349 return "UNKNOWN";
@@ -1190,6 +1376,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
1190 CSR_DRAM_INT_TBL_REG, 1376 CSR_DRAM_INT_TBL_REG,
1191 CSR_GIO_CHICKEN_BITS, 1377 CSR_GIO_CHICKEN_BITS,
1192 CSR_ANA_PLL_CFG, 1378 CSR_ANA_PLL_CFG,
1379 CSR_MONITOR_STATUS_REG,
1193 CSR_HW_REV_WA_REG, 1380 CSR_HW_REV_WA_REG,
1194 CSR_DBG_HPET_MEM_REG 1381 CSR_DBG_HPET_MEM_REG
1195 }; 1382 };
@@ -1407,16 +1594,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1407{ 1594{
1408 struct iwl_trans *trans = file->private_data; 1595 struct iwl_trans *trans = file->private_data;
1409 char *buf = NULL; 1596 char *buf = NULL;
1410 int pos = 0; 1597 ssize_t ret;
1411 ssize_t ret = -EFAULT;
1412
1413 ret = pos = iwl_dump_fh(trans, &buf);
1414 if (buf) {
1415 ret = simple_read_from_buffer(user_buf,
1416 count, ppos, buf, pos);
1417 kfree(buf);
1418 }
1419 1598
1599 ret = iwl_dump_fh(trans, &buf);
1600 if (ret < 0)
1601 return ret;
1602 if (!buf)
1603 return -EINVAL;
1604 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
1605 kfree(buf);
1420 return ret; 1606 return ret;
1421} 1607}
1422 1608
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3d549008b3e2..3b0c72c10054 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
208 le32_to_cpu(txq->scratchbufs[i].scratch)); 208 le32_to_cpu(txq->scratchbufs[i].scratch));
209 209
210 iwl_trans_fw_error(trans); 210 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
211} 211}
212 212
213/* 213/*
@@ -296,43 +296,38 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
296 if (txq->need_update == 0) 296 if (txq->need_update == 0)
297 return; 297 return;
298 298
299 if (trans->cfg->base_params->shadow_reg_enable || 299 /*
300 txq_id == trans_pcie->cmd_queue) { 300 * explicitly wake up the NIC if:
301 /* shadow register enabled */ 301 * 1. shadow registers aren't enabled
302 iwl_write32(trans, HBUS_TARG_WRPTR, 302 * 2. NIC is woken up for CMD regardless of shadow outside this function
303 txq->q.write_ptr | (txq_id << 8)); 303 * 3. there is a chance that the NIC is asleep
304 } else { 304 */
305 /* if we're trying to save power */ 305 if (!trans->cfg->base_params->shadow_reg_enable &&
306 if (test_bit(STATUS_TPOWER_PMI, &trans->status)) { 306 txq_id != trans_pcie->cmd_queue &&
307 /* wake up nic if it's powered down ... 307 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
308 * uCode will wake up, and interrupt us again, so next
309 * time we'll skip this part. */
310 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
311
312 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
313 IWL_DEBUG_INFO(trans,
314 "Tx queue %d requesting wakeup,"
315 " GP1 = 0x%x\n", txq_id, reg);
316 iwl_set_bit(trans, CSR_GP_CNTRL,
317 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
318 return;
319 }
320
321 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
322 txq->q.write_ptr);
323
324 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
325 txq->q.write_ptr | (txq_id << 8));
326
327 /* 308 /*
328 * else not in power-save mode, 309 * wake up nic if it's powered down ...
329 * uCode will never sleep when we're 310 * uCode will wake up, and interrupt us again, so next
330 * trying to tx (during RFKILL, we're not trying to tx). 311 * time we'll skip this part.
331 */ 312 */
332 } else 313 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
333 iwl_write32(trans, HBUS_TARG_WRPTR, 314
334 txq->q.write_ptr | (txq_id << 8)); 315 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
316 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
317 txq_id, reg);
318 iwl_set_bit(trans, CSR_GP_CNTRL,
319 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
320 return;
321 }
335 } 322 }
323
324 /*
325 * if not in power-save mode, uCode will never sleep when we're
326 * trying to tx (during RFKILL, we're not trying to tx).
327 */
328 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
329 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
330
336 txq->need_update = 0; 331 txq->need_update = 0;
337} 332}
338 333
@@ -705,8 +700,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
705 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 700 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
706 701
707 /* Enable L1-Active */ 702 /* Enable L1-Active */
708 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 703 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
709 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 704 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
705 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
710} 706}
711 707
712void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 708void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
@@ -1028,7 +1024,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1028 if (nfreed++ > 0) { 1024 if (nfreed++ > 0) {
1029 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1025 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1030 idx, q->write_ptr, q->read_ptr); 1026 idx, q->write_ptr, q->read_ptr);
1031 iwl_trans_fw_error(trans); 1027 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
1032 } 1028 }
1033 } 1029 }
1034 1030
@@ -1587,6 +1583,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1587 get_cmd_string(trans_pcie, cmd->id)); 1583 get_cmd_string(trans_pcie, cmd->id));
1588 ret = -ETIMEDOUT; 1584 ret = -ETIMEDOUT;
1589 1585
1586 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
1590 iwl_trans_fw_error(trans); 1587 iwl_trans_fw_error(trans);
1591 1588
1592 goto cancel; 1589 goto cancel;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index cb6d189bc3e6..54e344aed6e0 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1766,7 +1766,8 @@ static void lbs_join_post(struct lbs_private *priv,
1766 memcpy(priv->wdev->ssid, params->ssid, params->ssid_len); 1766 memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
1767 priv->wdev->ssid_len = params->ssid_len; 1767 priv->wdev->ssid_len = params->ssid_len;
1768 1768
1769 cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL); 1769 cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
1770 GFP_KERNEL);
1770 1771
1771 /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */ 1772 /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
1772 priv->connect_status = LBS_CONNECTED; 1773 priv->connect_status = LBS_CONNECTED;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 58c6ee5de98f..33ceda296c9c 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -498,7 +498,7 @@ static int if_sdio_prog_helper(struct if_sdio_card *card,
498 */ 498 */
499 mdelay(2); 499 mdelay(2);
500 500
501 chunk_size = min(size, (size_t)60); 501 chunk_size = min_t(size_t, size, 60);
502 502
503 *((__le32*)chunk_buffer) = cpu_to_le32(chunk_size); 503 *((__le32*)chunk_buffer) = cpu_to_le32(chunk_size);
504 memcpy(chunk_buffer + 4, firmware, chunk_size); 504 memcpy(chunk_buffer + 4, firmware, chunk_size);
@@ -639,7 +639,7 @@ static int if_sdio_prog_real(struct if_sdio_card *card,
639 req_size = size; 639 req_size = size;
640 640
641 while (req_size) { 641 while (req_size) {
642 chunk_size = min(req_size, (size_t)512); 642 chunk_size = min_t(size_t, req_size, 512);
643 643
644 memcpy(chunk_buffer, firmware, chunk_size); 644 memcpy(chunk_buffer, firmware, chunk_size);
645/* 645/*
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 69d4c3179d04..9d7a52f5a410 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -57,6 +57,10 @@ static bool rctbl = false;
57module_param(rctbl, bool, 0444); 57module_param(rctbl, bool, 0444);
58MODULE_PARM_DESC(rctbl, "Handle rate control table"); 58MODULE_PARM_DESC(rctbl, "Handle rate control table");
59 59
60static bool support_p2p_device = true;
61module_param(support_p2p_device, bool, 0444);
62MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
63
60/** 64/**
61 * enum hwsim_regtest - the type of regulatory tests we offer 65 * enum hwsim_regtest - the type of regulatory tests we offer
62 * 66 *
@@ -335,7 +339,8 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
335#endif 339#endif
336 BIT(NL80211_IFTYPE_AP) | 340 BIT(NL80211_IFTYPE_AP) |
337 BIT(NL80211_IFTYPE_P2P_GO) }, 341 BIT(NL80211_IFTYPE_P2P_GO) },
338 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, 342 /* must be last, see hwsim_if_comb */
343 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
339}; 344};
340 345
341static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = { 346static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
@@ -345,6 +350,27 @@ static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
345static const struct ieee80211_iface_combination hwsim_if_comb[] = { 350static const struct ieee80211_iface_combination hwsim_if_comb[] = {
346 { 351 {
347 .limits = hwsim_if_limits, 352 .limits = hwsim_if_limits,
353 /* remove the last entry which is P2P_DEVICE */
354 .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
355 .max_interfaces = 2048,
356 .num_different_channels = 1,
357 },
358 {
359 .limits = hwsim_if_dfs_limits,
360 .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
361 .max_interfaces = 8,
362 .num_different_channels = 1,
363 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
364 BIT(NL80211_CHAN_WIDTH_20) |
365 BIT(NL80211_CHAN_WIDTH_40) |
366 BIT(NL80211_CHAN_WIDTH_80) |
367 BIT(NL80211_CHAN_WIDTH_160),
368 }
369};
370
371static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
372 {
373 .limits = hwsim_if_limits,
348 .n_limits = ARRAY_SIZE(hwsim_if_limits), 374 .n_limits = ARRAY_SIZE(hwsim_if_limits),
349 .max_interfaces = 2048, 375 .max_interfaces = 2048,
350 .num_different_channels = 1, 376 .num_different_channels = 1,
@@ -385,6 +411,7 @@ struct mac80211_hwsim_data {
385 411
386 struct mac_address addresses[2]; 412 struct mac_address addresses[2];
387 int channels, idx; 413 int channels, idx;
414 bool use_chanctx;
388 415
389 struct ieee80211_channel *tmp_chan; 416 struct ieee80211_channel *tmp_chan;
390 struct delayed_work roc_done; 417 struct delayed_work roc_done;
@@ -451,7 +478,7 @@ static struct genl_family hwsim_genl_family = {
451 478
452/* MAC80211_HWSIM netlink policy */ 479/* MAC80211_HWSIM netlink policy */
453 480
454static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { 481static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
455 [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, 482 [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
456 [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, 483 [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
457 [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY, 484 [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
@@ -468,6 +495,7 @@ static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
468 [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 }, 495 [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
469 [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 }, 496 [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
470 [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG }, 497 [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
498 [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
471}; 499};
472 500
473static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, 501static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -1035,32 +1063,6 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
1035 ack = true; 1063 ack = true;
1036 1064
1037 rx_status.mactime = now + data2->tsf_offset; 1065 rx_status.mactime = now + data2->tsf_offset;
1038#if 0
1039 /*
1040 * Don't enable this code by default as the OUI 00:00:00
1041 * is registered to Xerox so we shouldn't use it here, it
1042 * might find its way into pcap files.
1043 * Note that this code requires the headroom in the SKB
1044 * that was allocated earlier.
1045 */
1046 rx_status.vendor_radiotap_oui[0] = 0x00;
1047 rx_status.vendor_radiotap_oui[1] = 0x00;
1048 rx_status.vendor_radiotap_oui[2] = 0x00;
1049 rx_status.vendor_radiotap_subns = 127;
1050 /*
1051 * Radiotap vendor namespaces can (and should) also be
1052 * split into fields by using the standard radiotap
1053 * presence bitmap mechanism. Use just BIT(0) here for
1054 * the presence bitmap.
1055 */
1056 rx_status.vendor_radiotap_bitmap = BIT(0);
1057 /* We have 8 bytes of (dummy) data */
1058 rx_status.vendor_radiotap_len = 8;
1059 /* For testing, also require it to be aligned */
1060 rx_status.vendor_radiotap_align = 8;
1061 /* push the data */
1062 memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
1063#endif
1064 1066
1065 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); 1067 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
1066 ieee80211_rx_irqsafe(data2->hw, nskb); 1068 ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -1087,7 +1089,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
1087 return; 1089 return;
1088 } 1090 }
1089 1091
1090 if (data->channels == 1) { 1092 if (!data->use_chanctx) {
1091 channel = data->channel; 1093 channel = data->channel;
1092 } else if (txi->hw_queue == 4) { 1094 } else if (txi->hw_queue == 4) {
1093 channel = data->tmp_chan; 1095 channel = data->tmp_chan;
@@ -1275,6 +1277,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
1275 1277
1276 mac80211_hwsim_tx_frame(hw, skb, 1278 mac80211_hwsim_tx_frame(hw, skb,
1277 rcu_dereference(vif->chanctx_conf)->def.chan); 1279 rcu_dereference(vif->chanctx_conf)->def.chan);
1280
1281 if (vif->csa_active && ieee80211_csa_is_complete(vif))
1282 ieee80211_csa_finish(vif);
1278} 1283}
1279 1284
1280static enum hrtimer_restart 1285static enum hrtimer_restart
@@ -1350,7 +1355,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
1350 1355
1351 data->channel = conf->chandef.chan; 1356 data->channel = conf->chandef.chan;
1352 1357
1353 WARN_ON(data->channel && data->channels > 1); 1358 WARN_ON(data->channel && data->use_chanctx);
1354 1359
1355 data->power_level = conf->power_level; 1360 data->power_level = conf->power_level;
1356 if (!data->started || !data->beacon_int) 1361 if (!data->started || !data->beacon_int)
@@ -1936,7 +1941,8 @@ static struct ieee80211_ops mac80211_hwsim_mchan_ops;
1936 1941
1937static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, 1942static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
1938 const struct ieee80211_regdomain *regd, 1943 const struct ieee80211_regdomain *regd,
1939 bool reg_strict) 1944 bool reg_strict, bool p2p_device,
1945 bool use_chanctx)
1940{ 1946{
1941 int err; 1947 int err;
1942 u8 addr[ETH_ALEN]; 1948 u8 addr[ETH_ALEN];
@@ -1946,11 +1952,14 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
1946 const struct ieee80211_ops *ops = &mac80211_hwsim_ops; 1952 const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
1947 int idx; 1953 int idx;
1948 1954
1955 if (WARN_ON(channels > 1 && !use_chanctx))
1956 return -EINVAL;
1957
1949 spin_lock_bh(&hwsim_radio_lock); 1958 spin_lock_bh(&hwsim_radio_lock);
1950 idx = hwsim_radio_idx++; 1959 idx = hwsim_radio_idx++;
1951 spin_unlock_bh(&hwsim_radio_lock); 1960 spin_unlock_bh(&hwsim_radio_lock);
1952 1961
1953 if (channels > 1) 1962 if (use_chanctx)
1954 ops = &mac80211_hwsim_mchan_ops; 1963 ops = &mac80211_hwsim_mchan_ops;
1955 hw = ieee80211_alloc_hw(sizeof(*data), ops); 1964 hw = ieee80211_alloc_hw(sizeof(*data), ops);
1956 if (!hw) { 1965 if (!hw) {
@@ -1991,17 +2000,25 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
1991 hw->wiphy->addresses = data->addresses; 2000 hw->wiphy->addresses = data->addresses;
1992 2001
1993 data->channels = channels; 2002 data->channels = channels;
2003 data->use_chanctx = use_chanctx;
1994 data->idx = idx; 2004 data->idx = idx;
1995 2005
1996 if (data->channels > 1) { 2006 if (data->use_chanctx) {
1997 hw->wiphy->max_scan_ssids = 255; 2007 hw->wiphy->max_scan_ssids = 255;
1998 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; 2008 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
1999 hw->wiphy->max_remain_on_channel_duration = 1000; 2009 hw->wiphy->max_remain_on_channel_duration = 1000;
2000 /* For channels > 1 DFS is not allowed */ 2010 /* For channels > 1 DFS is not allowed */
2001 hw->wiphy->n_iface_combinations = 1; 2011 hw->wiphy->n_iface_combinations = 1;
2002 hw->wiphy->iface_combinations = &data->if_combination; 2012 hw->wiphy->iface_combinations = &data->if_combination;
2003 data->if_combination = hwsim_if_comb[0]; 2013 if (p2p_device)
2014 data->if_combination = hwsim_if_comb_p2p_dev[0];
2015 else
2016 data->if_combination = hwsim_if_comb[0];
2004 data->if_combination.num_different_channels = data->channels; 2017 data->if_combination.num_different_channels = data->channels;
2018 } else if (p2p_device) {
2019 hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
2020 hw->wiphy->n_iface_combinations =
2021 ARRAY_SIZE(hwsim_if_comb_p2p_dev);
2005 } else { 2022 } else {
2006 hw->wiphy->iface_combinations = hwsim_if_comb; 2023 hw->wiphy->iface_combinations = hwsim_if_comb;
2007 hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb); 2024 hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
@@ -2017,8 +2034,10 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2017 BIT(NL80211_IFTYPE_P2P_CLIENT) | 2034 BIT(NL80211_IFTYPE_P2P_CLIENT) |
2018 BIT(NL80211_IFTYPE_P2P_GO) | 2035 BIT(NL80211_IFTYPE_P2P_GO) |
2019 BIT(NL80211_IFTYPE_ADHOC) | 2036 BIT(NL80211_IFTYPE_ADHOC) |
2020 BIT(NL80211_IFTYPE_MESH_POINT) | 2037 BIT(NL80211_IFTYPE_MESH_POINT);
2021 BIT(NL80211_IFTYPE_P2P_DEVICE); 2038
2039 if (p2p_device)
2040 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
2022 2041
2023 hw->flags = IEEE80211_HW_MFP_CAPABLE | 2042 hw->flags = IEEE80211_HW_MFP_CAPABLE |
2024 IEEE80211_HW_SIGNAL_DBM | 2043 IEEE80211_HW_SIGNAL_DBM |
@@ -2027,13 +2046,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2027 IEEE80211_HW_AMPDU_AGGREGATION | 2046 IEEE80211_HW_AMPDU_AGGREGATION |
2028 IEEE80211_HW_WANT_MONITOR_VIF | 2047 IEEE80211_HW_WANT_MONITOR_VIF |
2029 IEEE80211_HW_QUEUE_CONTROL | 2048 IEEE80211_HW_QUEUE_CONTROL |
2030 IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 2049 IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
2050 IEEE80211_HW_CHANCTX_STA_CSA;
2031 if (rctbl) 2051 if (rctbl)
2032 hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE; 2052 hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
2033 2053
2034 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | 2054 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
2035 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | 2055 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
2036 WIPHY_FLAG_AP_UAPSD; 2056 WIPHY_FLAG_AP_UAPSD |
2057 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
2037 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 2058 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
2038 2059
2039 /* ask mac80211 to reserve space for magic */ 2060 /* ask mac80211 to reserve space for magic */
@@ -2141,7 +2162,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2141 debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps); 2162 debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
2142 debugfs_create_file("group", 0666, data->debugfs, data, 2163 debugfs_create_file("group", 0666, data->debugfs, data,
2143 &hwsim_fops_group); 2164 &hwsim_fops_group);
2144 if (data->channels == 1) 2165 if (!data->use_chanctx)
2145 debugfs_create_file("dfs_simulate_radar", 0222, 2166 debugfs_create_file("dfs_simulate_radar", 0222,
2146 data->debugfs, 2167 data->debugfs,
2147 data, &hwsim_simulate_radar); 2168 data, &hwsim_simulate_radar);
@@ -2407,10 +2428,17 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
2407 const char *alpha2 = NULL; 2428 const char *alpha2 = NULL;
2408 const struct ieee80211_regdomain *regd = NULL; 2429 const struct ieee80211_regdomain *regd = NULL;
2409 bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; 2430 bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
2431 bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
2432 bool use_chanctx;
2410 2433
2411 if (info->attrs[HWSIM_ATTR_CHANNELS]) 2434 if (info->attrs[HWSIM_ATTR_CHANNELS])
2412 chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); 2435 chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
2413 2436
2437 if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
2438 use_chanctx = true;
2439 else
2440 use_chanctx = (chans > 1);
2441
2414 if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) 2442 if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
2415 alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); 2443 alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
2416 2444
@@ -2422,7 +2450,8 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
2422 regd = hwsim_world_regdom_custom[idx]; 2450 regd = hwsim_world_regdom_custom[idx];
2423 } 2451 }
2424 2452
2425 return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict); 2453 return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
2454 p2p_device, use_chanctx);
2426} 2455}
2427 2456
2428static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info) 2457static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -2640,7 +2669,9 @@ static int __init init_mac80211_hwsim(void)
2640 } 2669 }
2641 2670
2642 err = mac80211_hwsim_create_radio(channels, reg_alpha2, 2671 err = mac80211_hwsim_create_radio(channels, reg_alpha2,
2643 regd, reg_strict); 2672 regd, reg_strict,
2673 support_p2p_device,
2674 channels > 1);
2644 if (err < 0) 2675 if (err < 0)
2645 goto out_free_radios; 2676 goto out_free_radios;
2646 } 2677 }
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 2747cce5a269..c9d0315575ba 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -107,6 +107,10 @@ enum {
107 * (nla string, length 2) 107 * (nla string, length 2)
108 * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute) 108 * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
109 * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute) 109 * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
110 * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag)
111 * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO
112 * command to force use of channel contexts even when only a
113 * single channel is supported
110 * @__HWSIM_ATTR_MAX: enum limit 114 * @__HWSIM_ATTR_MAX: enum limit
111 */ 115 */
112 116
@@ -126,6 +130,8 @@ enum {
126 HWSIM_ATTR_REG_HINT_ALPHA2, 130 HWSIM_ATTR_REG_HINT_ALPHA2,
127 HWSIM_ATTR_REG_CUSTOM_REG, 131 HWSIM_ATTR_REG_CUSTOM_REG,
128 HWSIM_ATTR_REG_STRICT_REG, 132 HWSIM_ATTR_REG_STRICT_REG,
133 HWSIM_ATTR_SUPPORT_P2P_DEVICE,
134 HWSIM_ATTR_USE_CHANCTX,
129 __HWSIM_ATTR_MAX, 135 __HWSIM_ATTR_MAX,
130}; 136};
131#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) 137#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5d9a8084665d..c92f27aa71ed 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -23,6 +23,31 @@
23#include "main.h" 23#include "main.h"
24#include "11ac.h" 24#include "11ac.h"
25 25
26/* Tables of the MCS map to the highest data rate (in Mbps) supported
27 * for long GI.
28 */
29static const u16 max_rate_lgi_80MHZ[8][3] = {
30 {0x124, 0x15F, 0x186}, /* NSS = 1 */
31 {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
32 {0x36D, 0x41D, 0x492}, /* NSS = 3 */
33 {0x492, 0x57C, 0x618}, /* NSS = 4 */
34 {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
35 {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
36 {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
37 {0x924, 0xAF8, 0xC30} /* NSS = 8 */
38};
39
40static const u16 max_rate_lgi_160MHZ[8][3] = {
41 {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
42 {0x492, 0x57C, 0x618}, /* NSS = 2 */
43 {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
44 {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
45 {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
46 {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
47 {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
48 {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
49};
50
26/* This function converts the 2-bit MCS map to the highest long GI 51/* This function converts the 2-bit MCS map to the highest long GI
27 * VHT data rate. 52 * VHT data rate.
28 */ 53 */
@@ -30,33 +55,10 @@ static u16
30mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv, 55mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
31 u8 bands, u16 mcs_map) 56 u8 bands, u16 mcs_map)
32{ 57{
33 u8 i, nss, max_mcs; 58 u8 i, nss, mcs;
34 u16 max_rate = 0; 59 u16 max_rate = 0;
35 u32 usr_vht_cap_info = 0; 60 u32 usr_vht_cap_info = 0;
36 struct mwifiex_adapter *adapter = priv->adapter; 61 struct mwifiex_adapter *adapter = priv->adapter;
37 /* tables of the MCS map to the highest data rate (in Mbps)
38 * supported for long GI
39 */
40 u16 max_rate_lgi_80MHZ[8][3] = {
41 {0x124, 0x15F, 0x186}, /* NSS = 1 */
42 {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
43 {0x36D, 0x41D, 0x492}, /* NSS = 3 */
44 {0x492, 0x57C, 0x618}, /* NSS = 4 */
45 {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
46 {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
47 {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
48 {0x924, 0xAF8, 0xC30} /* NSS = 8 */
49 };
50 u16 max_rate_lgi_160MHZ[8][3] = {
51 {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
52 {0x492, 0x57C, 0x618}, /* NSS = 2 */
53 {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
54 {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
55 {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
56 {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
57 {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
58 {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
59 };
60 62
61 if (bands & BAND_AAC) 63 if (bands & BAND_AAC)
62 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a; 64 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
@@ -64,29 +66,29 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
64 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg; 66 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
65 67
66 /* find the max NSS supported */ 68 /* find the max NSS supported */
67 nss = 0; 69 nss = 1;
68 for (i = 0; i < 8; i++) { 70 for (i = 1; i <= 8; i++) {
69 max_mcs = (mcs_map >> (2 * i)) & 0x3; 71 mcs = GET_VHTNSSMCS(mcs_map, i);
70 if (max_mcs < 3) 72 if (mcs < IEEE80211_VHT_MCS_NOT_SUPPORTED)
71 nss = i; 73 nss = i;
72 } 74 }
73 max_mcs = (mcs_map >> (2 * nss)) & 0x3; 75 mcs = GET_VHTNSSMCS(mcs_map, nss);
74 76
75 /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */ 77 /* if mcs is 3, nss must be 1 (NSS = 1). Default mcs to MCS 0~9 */
76 if (max_mcs >= 3) 78 if (mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
77 max_mcs = 2; 79 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
78 80
79 if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) { 81 if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) {
80 /* support 160 MHz */ 82 /* support 160 MHz */
81 max_rate = max_rate_lgi_160MHZ[nss][max_mcs]; 83 max_rate = max_rate_lgi_160MHZ[nss - 1][mcs];
82 if (!max_rate) 84 if (!max_rate)
83 /* MCS9 is not supported in NSS6 */ 85 /* MCS9 is not supported in NSS6 */
84 max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1]; 86 max_rate = max_rate_lgi_160MHZ[nss - 1][mcs - 1];
85 } else { 87 } else {
86 max_rate = max_rate_lgi_80MHZ[nss][max_mcs]; 88 max_rate = max_rate_lgi_80MHZ[nss - 1][mcs];
87 if (!max_rate) 89 if (!max_rate)
88 /* MCS9 is not supported in NSS3 */ 90 /* MCS9 is not supported in NSS3 */
89 max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1]; 91 max_rate = max_rate_lgi_80MHZ[nss - 1][mcs - 1];
90 } 92 }
91 93
92 return max_rate; 94 return max_rate;
@@ -94,21 +96,20 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
94 96
95static void 97static void
96mwifiex_fill_vht_cap_info(struct mwifiex_private *priv, 98mwifiex_fill_vht_cap_info(struct mwifiex_private *priv,
97 struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands) 99 struct ieee80211_vht_cap *vht_cap, u8 bands)
98{ 100{
99 struct mwifiex_adapter *adapter = priv->adapter; 101 struct mwifiex_adapter *adapter = priv->adapter;
100 102
101 if (bands & BAND_A) 103 if (bands & BAND_A)
102 vht_cap->vht_cap.vht_cap_info = 104 vht_cap->vht_cap_info =
103 cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a); 105 cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a);
104 else 106 else
105 vht_cap->vht_cap.vht_cap_info = 107 vht_cap->vht_cap_info =
106 cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg); 108 cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg);
107} 109}
108 110
109static void 111void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
110mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv, 112 struct ieee80211_vht_cap *vht_cap, u8 bands)
111 struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
112{ 113{
113 struct mwifiex_adapter *adapter = priv->adapter; 114 struct mwifiex_adapter *adapter = priv->adapter;
114 u16 mcs_map_user, mcs_map_resp, mcs_map_result; 115 u16 mcs_map_user, mcs_map_resp, mcs_map_result;
@@ -119,46 +120,48 @@ mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
119 120
120 /* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */ 121 /* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */
121 mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support); 122 mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
122 mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map); 123 mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
123 mcs_map_result = 0; 124 mcs_map_result = 0;
124 125
125 for (nss = 1; nss <= 8; nss++) { 126 for (nss = 1; nss <= 8; nss++) {
126 mcs_user = GET_VHTNSSMCS(mcs_map_user, nss); 127 mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
127 mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss); 128 mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
128 129
129 if ((mcs_user == NO_NSS_SUPPORT) || 130 if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
130 (mcs_resp == NO_NSS_SUPPORT)) 131 (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
131 SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT); 132 SET_VHTNSSMCS(mcs_map_result, nss,
133 IEEE80211_VHT_MCS_NOT_SUPPORTED);
132 else 134 else
133 SET_VHTNSSMCS(mcs_map_result, nss, 135 SET_VHTNSSMCS(mcs_map_result, nss,
134 min(mcs_user, mcs_resp)); 136 min(mcs_user, mcs_resp));
135 } 137 }
136 138
137 vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result); 139 vht_cap->supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
138 140
139 tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result); 141 tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
140 vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp); 142 vht_cap->supp_mcs.rx_highest = cpu_to_le16(tmp);
141 143
142 /* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */ 144 /* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */
143 mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support); 145 mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support);
144 mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map); 146 mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
145 mcs_map_result = 0; 147 mcs_map_result = 0;
146 148
147 for (nss = 1; nss <= 8; nss++) { 149 for (nss = 1; nss <= 8; nss++) {
148 mcs_user = GET_VHTNSSMCS(mcs_map_user, nss); 150 mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
149 mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss); 151 mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
150 if ((mcs_user == NO_NSS_SUPPORT) || 152 if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
151 (mcs_resp == NO_NSS_SUPPORT)) 153 (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
152 SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT); 154 SET_VHTNSSMCS(mcs_map_result, nss,
155 IEEE80211_VHT_MCS_NOT_SUPPORTED);
153 else 156 else
154 SET_VHTNSSMCS(mcs_map_result, nss, 157 SET_VHTNSSMCS(mcs_map_result, nss,
155 min(mcs_user, mcs_resp)); 158 min(mcs_user, mcs_resp));
156 } 159 }
157 160
158 vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result); 161 vht_cap->supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
159 162
160 tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result); 163 tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
161 vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp); 164 vht_cap->supp_mcs.tx_highest = cpu_to_le16(tmp);
162 165
163 return; 166 return;
164} 167}
@@ -192,7 +195,8 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
192 (u8 *)bss_desc->bcn_vht_cap, 195 (u8 *)bss_desc->bcn_vht_cap,
193 le16_to_cpu(vht_cap->header.len)); 196 le16_to_cpu(vht_cap->header.len));
194 197
195 mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); 198 mwifiex_fill_vht_cap_tlv(priv, &vht_cap->vht_cap,
199 bss_desc->bss_band);
196 *buffer += sizeof(*vht_cap); 200 *buffer += sizeof(*vht_cap);
197 ret_len += sizeof(*vht_cap); 201 ret_len += sizeof(*vht_cap);
198 } 202 }
@@ -299,3 +303,81 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv)
299 303
300 return; 304 return;
301} 305}
306
307bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv)
308{
309 struct mwifiex_bssdescriptor *bss_desc;
310 struct ieee80211_vht_operation *vht_oper;
311
312 bss_desc = &priv->curr_bss_params.bss_descriptor;
313 vht_oper = bss_desc->bcn_vht_oper;
314
315 if (!bss_desc->bcn_vht_cap || !vht_oper)
316 return false;
317
318 if (vht_oper->chan_width == IEEE80211_VHT_CHANWIDTH_USE_HT)
319 return false;
320
321 return true;
322}
323
324u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
325 u32 pri_chan, u8 chan_bw)
326{
327 u8 center_freq_idx = 0;
328
329 if (band & BAND_AAC) {
330 switch (pri_chan) {
331 case 36:
332 case 40:
333 case 44:
334 case 48:
335 if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
336 center_freq_idx = 42;
337 break;
338 case 52:
339 case 56:
340 case 60:
341 case 64:
342 if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
343 center_freq_idx = 58;
344 else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
345 center_freq_idx = 50;
346 break;
347 case 100:
348 case 104:
349 case 108:
350 case 112:
351 if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
352 center_freq_idx = 106;
353 break;
354 case 116:
355 case 120:
356 case 124:
357 case 128:
358 if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
359 center_freq_idx = 122;
360 else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
361 center_freq_idx = 114;
362 break;
363 case 132:
364 case 136:
365 case 140:
366 case 144:
367 if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
368 center_freq_idx = 138;
369 break;
370 case 149:
371 case 153:
372 case 157:
373 case 161:
374 if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
375 center_freq_idx = 155;
376 break;
377 default:
378 center_freq_idx = 42;
379 }
380 }
381
382 return center_freq_idx;
383}
diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h
index 7c2c69b5b3eb..0b02cb6cfcb4 100644
--- a/drivers/net/wireless/mwifiex/11ac.h
+++ b/drivers/net/wireless/mwifiex/11ac.h
@@ -40,4 +40,6 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
40int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv, 40int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
41 struct host_cmd_ds_command *cmd, u16 cmd_action, 41 struct host_cmd_ds_command *cmd, u16 cmd_action,
42 struct mwifiex_11ac_vht_cfg *cfg); 42 struct mwifiex_11ac_vht_cfg *cfg);
43void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
44 struct ieee80211_vht_cap *vht_cap, u8 bands);
43#endif /* _MWIFIEX_11AC_H_ */ 45#endif /* _MWIFIEX_11AC_H_ */
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 8d683070bdb3..e76b0db4e3e6 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -73,8 +73,8 @@ static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
73{ 73{
74 u32 enable = flag; 74 u32 enable = flag;
75 75
76 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 76 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
77 HostCmd_ACT_GEN_SET, DOT11H_I, &enable); 77 HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true);
78} 78}
79 79
80/* This functions processes TLV buffer for a pending BSS Join command. 80/* This functions processes TLV buffer for a pending BSS Join command.
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 7db1a89fdd95..d14ead8beca8 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -34,22 +34,26 @@
34 * 34 *
35 * RD responder bit to set to clear in the extended capability header. 35 * RD responder bit to set to clear in the extended capability header.
36 */ 36 */
37void 37int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
38mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type, 38 struct ieee80211_ht_cap *ht_cap)
39 struct mwifiex_ie_types_htcap *ht_cap)
40{ 39{
41 uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info); 40 uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
42 struct ieee80211_supported_band *sband = 41 struct ieee80211_supported_band *sband =
43 priv->wdev->wiphy->bands[radio_type]; 42 priv->wdev->wiphy->bands[radio_type];
44 43
45 ht_cap->ht_cap.ampdu_params_info = 44 if (WARN_ON_ONCE(!sband)) {
45 dev_err(priv->adapter->dev, "Invalid radio type!\n");
46 return -EINVAL;
47 }
48
49 ht_cap->ampdu_params_info =
46 (sband->ht_cap.ampdu_factor & 50 (sband->ht_cap.ampdu_factor &
47 IEEE80211_HT_AMPDU_PARM_FACTOR) | 51 IEEE80211_HT_AMPDU_PARM_FACTOR) |
48 ((sband->ht_cap.ampdu_density << 52 ((sband->ht_cap.ampdu_density <<
49 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) & 53 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
50 IEEE80211_HT_AMPDU_PARM_DENSITY); 54 IEEE80211_HT_AMPDU_PARM_DENSITY);
51 55
52 memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs, 56 memcpy((u8 *)&ht_cap->mcs, &sband->ht_cap.mcs,
53 sizeof(sband->ht_cap.mcs)); 57 sizeof(sband->ht_cap.mcs));
54 58
55 if (priv->bss_mode == NL80211_IFTYPE_STATION || 59 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
@@ -57,13 +61,18 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
57 (priv->adapter->sec_chan_offset != 61 (priv->adapter->sec_chan_offset !=
58 IEEE80211_HT_PARAM_CHA_SEC_NONE))) 62 IEEE80211_HT_PARAM_CHA_SEC_NONE)))
59 /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */ 63 /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
60 SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask); 64 SETHT_MCS32(ht_cap->mcs.rx_mask);
61 65
62 /* Clear RD responder bit */ 66 /* Clear RD responder bit */
63 ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER; 67 ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER;
64 68
65 ht_cap->ht_cap.cap_info = cpu_to_le16(sband->ht_cap.cap); 69 ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
66 ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap); 70 ht_cap->extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
71
72 if (ISSUPP_BEAMFORMING(priv->adapter->hw_dot_11n_dev_cap))
73 ht_cap->tx_BF_cap_info = cpu_to_le32(MWIFIEX_DEF_11N_TX_BF_CAP);
74
75 return 0;
67} 76}
68 77
69/* 78/*
@@ -150,28 +159,34 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
150 int tid; 159 int tid;
151 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp; 160 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
152 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl; 161 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
162 u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
153 163
154 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn)) 164 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
155 & SSN_MASK); 165 & SSN_MASK);
156 166
157 tid = (le16_to_cpu(add_ba_rsp->block_ack_param_set) 167 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
158 & IEEE80211_ADDBA_PARAM_TID_MASK) 168 >> BLOCKACKPARAM_TID_POS;
159 >> BLOCKACKPARAM_TID_POS; 169 if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
160 if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
161 tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid,
162 add_ba_rsp->peer_mac_addr);
163 if (tx_ba_tbl) {
164 dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
165 tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
166 } else {
167 dev_err(priv->adapter->dev, "BA stream not created\n");
168 }
169 } else {
170 mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr, 170 mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
171 TYPE_DELBA_SENT, true); 171 TYPE_DELBA_SENT, true);
172 if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT) 172 if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
173 priv->aggr_prio_tbl[tid].ampdu_ap = 173 priv->aggr_prio_tbl[tid].ampdu_ap =
174 BA_STREAM_NOT_ALLOWED; 174 BA_STREAM_NOT_ALLOWED;
175 return 0;
176 }
177
178 tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
179 if (tx_ba_tbl) {
180 dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
181 tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
182 if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
183 priv->add_ba_param.tx_amsdu &&
184 (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
185 tx_ba_tbl->amsdu = true;
186 else
187 tx_ba_tbl->amsdu = false;
188 } else {
189 dev_err(priv->adapter->dev, "BA stream not created\n");
175 } 190 }
176 191
177 return 0; 192 return 0;
@@ -311,7 +326,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
311 (u8 *)bss_desc->bcn_ht_cap, 326 (u8 *)bss_desc->bcn_ht_cap,
312 le16_to_cpu(ht_cap->header.len)); 327 le16_to_cpu(ht_cap->header.len));
313 328
314 mwifiex_fill_cap_info(priv, radio_type, ht_cap); 329 mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
315 330
316 *buffer += sizeof(struct mwifiex_ie_types_htcap); 331 *buffer += sizeof(struct mwifiex_ie_types_htcap);
317 ret_len += sizeof(struct mwifiex_ie_types_htcap); 332 ret_len += sizeof(struct mwifiex_ie_types_htcap);
@@ -527,16 +542,39 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
527int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) 542int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
528{ 543{
529 struct host_cmd_ds_11n_addba_req add_ba_req; 544 struct host_cmd_ds_11n_addba_req add_ba_req;
545 struct mwifiex_sta_node *sta_ptr;
546 u32 tx_win_size = priv->add_ba_param.tx_win_size;
530 static u8 dialog_tok; 547 static u8 dialog_tok;
531 int ret; 548 int ret;
549 u16 block_ack_param_set;
532 550
533 dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid); 551 dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
534 552
535 add_ba_req.block_ack_param_set = cpu_to_le16( 553 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
536 (u16) ((tid << BLOCKACKPARAM_TID_POS) | 554 ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
537 (priv->add_ba_param. 555 priv->adapter->is_hw_11ac_capable &&
538 tx_win_size << BLOCKACKPARAM_WINSIZE_POS) | 556 memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
539 IMMEDIATE_BLOCK_ACK)); 557 sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
558 if (!sta_ptr) {
559 dev_warn(priv->adapter->dev,
560 "BA setup with unknown TDLS peer %pM!\n",
561 peer_mac);
562 return -1;
563 }
564 if (sta_ptr->is_11ac_enabled)
565 tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
566 }
567
568 block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
569 tx_win_size << BLOCKACKPARAM_WINSIZE_POS |
570 IMMEDIATE_BLOCK_ACK);
571
572 /* enable AMSDU inside AMPDU */
573 if (priv->add_ba_param.tx_amsdu &&
574 (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
575 block_ack_param_set |= BLOCKACKPARAM_AMSDU_SUPP_MASK;
576
577 add_ba_req.block_ack_param_set = cpu_to_le16(block_ack_param_set);
540 add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout); 578 add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
541 579
542 ++dialog_tok; 580 ++dialog_tok;
@@ -548,8 +586,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
548 memcpy(&add_ba_req.peer_mac_addr, peer_mac, ETH_ALEN); 586 memcpy(&add_ba_req.peer_mac_addr, peer_mac, ETH_ALEN);
549 587
550 /* We don't wait for the response of this command */ 588 /* We don't wait for the response of this command */
551 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_REQ, 589 ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_REQ,
552 0, 0, &add_ba_req); 590 0, 0, &add_ba_req, false);
553 591
554 return ret; 592 return ret;
555} 593}
@@ -576,8 +614,8 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
576 memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN); 614 memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN);
577 615
578 /* We don't wait for the response of this command */ 616 /* We don't wait for the response of this command */
579 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 617 ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA,
580 HostCmd_ACT_GEN_SET, 0, &delba); 618 HostCmd_ACT_GEN_SET, 0, &delba, false);
581 619
582 return ret; 620 return ret;
583} 621}
@@ -651,6 +689,7 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
651 dev_dbg(priv->adapter->dev, "data: %s tid=%d\n", 689 dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
652 __func__, rx_reo_tbl->tid); 690 __func__, rx_reo_tbl->tid);
653 memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN); 691 memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
692 rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
654 rx_reo_tbl++; 693 rx_reo_tbl++;
655 count++; 694 count++;
656 if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED) 695 if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
@@ -706,5 +745,8 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
706 MWIFIEX_STA_AMPDU_DEF_RXWINSIZE; 745 MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
707 } 746 }
708 747
748 priv->add_ba_param.tx_amsdu = true;
749 priv->add_ba_param.rx_amsdu = true;
750
709 return; 751 return;
710} 752}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 375db01442bf..40b007a00f4b 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -34,8 +34,8 @@ int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
34int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, 34int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
35 struct mwifiex_bssdescriptor *bss_desc, 35 struct mwifiex_bssdescriptor *bss_desc,
36 u8 **buffer); 36 u8 **buffer);
37void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type, 37int mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
38 struct mwifiex_ie_types_htcap *); 38 struct ieee80211_ht_cap *);
39int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv, 39int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
40 u16 action, int *htcap_cfg); 40 u16 action, int *htcap_cfg);
41void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv, 41void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
@@ -64,14 +64,46 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
64 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl); 64 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
65void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra); 65void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
66 66
67/*
68 * This function checks whether AMPDU is allowed or not for a particular TID.
69 */
70static inline u8 67static inline u8
71mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid) 68mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
69 struct mwifiex_ra_list_tbl *ptr, int tid)
72{ 70{
73 return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED) 71 struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ptr->ra);
74 ? true : false); 72
73 if (unlikely(!node))
74 return false;
75
76 return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
77}
78
79/* This function checks whether AMSDU is allowed for BA stream. */
80static inline u8
81mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
82 struct mwifiex_ra_list_tbl *ptr, int tid)
83{
84 struct mwifiex_tx_ba_stream_tbl *tx_tbl;
85
86 tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
87 if (tx_tbl)
88 return tx_tbl->amsdu;
89
90 return false;
91}
92
93/* This function checks whether AMPDU is allowed or not for a particular TID. */
94static inline u8
95mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
96 struct mwifiex_ra_list_tbl *ptr, int tid)
97{
98 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
99 return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
100 } else {
101 if (ptr->tdls_link)
102 return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
103
104 return (priv->aggr_prio_tbl[tid].ampdu_ap !=
105 BA_STREAM_NOT_ALLOWED) ? true : false;
106 }
75} 107}
76 108
77/* 109/*
@@ -165,4 +197,14 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
165 197
166 return node->is_11n_enabled; 198 return node->is_11n_enabled;
167} 199}
200
201static inline u8
202mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
203{
204 struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
205 if (node)
206 return node->is_11n_enabled;
207
208 return false;
209}
168#endif /* !_MWIFIEX_11N_H_ */ 210#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index ada809f576fe..0c3571f830b0 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -26,6 +26,56 @@
26#include "11n.h" 26#include "11n.h"
27#include "11n_rxreorder.h" 27#include "11n_rxreorder.h"
28 28
29/* This function will dispatch amsdu packet and forward it to kernel/upper
30 * layer.
31 */
32static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
33 struct sk_buff *skb)
34{
35 struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
36 int ret;
37
38 if (le16_to_cpu(local_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
39 struct sk_buff_head list;
40 struct sk_buff *rx_skb;
41
42 __skb_queue_head_init(&list);
43
44 skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
45 skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
46
47 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
48 priv->wdev->iftype, 0, false);
49
50 while (!skb_queue_empty(&list)) {
51 rx_skb = __skb_dequeue(&list);
52 ret = mwifiex_recv_packet(priv, rx_skb);
53 if (ret == -1)
54 dev_err(priv->adapter->dev,
55 "Rx of A-MSDU failed");
56 }
57 return 0;
58 }
59
60 return -1;
61}
62
63/* This function will process the rx packet and forward it to kernel/upper
64 * layer.
65 */
66static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
67{
68 int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
69
70 if (!ret)
71 return 0;
72
73 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
74 return mwifiex_handle_uap_rx_forward(priv, payload);
75
76 return mwifiex_process_rx_packet(priv, payload);
77}
78
29/* 79/*
30 * This function dispatches all packets in the Rx reorder table until the 80 * This function dispatches all packets in the Rx reorder table until the
31 * start window. 81 * start window.
@@ -35,8 +85,9 @@
35 * circular buffer. 85 * circular buffer.
36 */ 86 */
37static void 87static void
38mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, 88mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
39 struct mwifiex_rx_reorder_tbl *tbl, int start_win) 89 struct mwifiex_rx_reorder_tbl *tbl,
90 int start_win)
40{ 91{
41 int pkt_to_send, i; 92 int pkt_to_send, i;
42 void *rx_tmp_ptr; 93 void *rx_tmp_ptr;
@@ -54,12 +105,8 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
54 tbl->rx_reorder_ptr[i] = NULL; 105 tbl->rx_reorder_ptr[i] = NULL;
55 } 106 }
56 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); 107 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
57 if (rx_tmp_ptr) { 108 if (rx_tmp_ptr)
58 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) 109 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
59 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
60 else
61 mwifiex_process_rx_packet(priv, rx_tmp_ptr);
62 }
63 } 110 }
64 111
65 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 112 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -101,11 +148,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
101 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; 148 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
102 tbl->rx_reorder_ptr[i] = NULL; 149 tbl->rx_reorder_ptr[i] = NULL;
103 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); 150 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
104 151 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
105 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
106 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
107 else
108 mwifiex_process_rx_packet(priv, rx_tmp_ptr);
109 } 152 }
110 153
111 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 154 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -135,14 +178,15 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
135 struct mwifiex_rx_reorder_tbl *tbl) 178 struct mwifiex_rx_reorder_tbl *tbl)
136{ 179{
137 unsigned long flags; 180 unsigned long flags;
181 int start_win;
138 182
139 if (!tbl) 183 if (!tbl)
140 return; 184 return;
141 185
142 mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) & 186 start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
143 (MAX_TID_VALUE - 1)); 187 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
144 188
145 del_timer(&tbl->timer_context.timer); 189 del_timer_sync(&tbl->timer_context.timer);
146 190
147 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 191 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
148 list_del(&tbl->list); 192 list_del(&tbl->list);
@@ -228,17 +272,17 @@ mwifiex_flush_data(unsigned long context)
228{ 272{
229 struct reorder_tmr_cnxt *ctx = 273 struct reorder_tmr_cnxt *ctx =
230 (struct reorder_tmr_cnxt *) context; 274 (struct reorder_tmr_cnxt *) context;
231 int start_win; 275 int start_win, seq_num;
232 276
233 start_win = mwifiex_11n_find_last_seq_num(ctx->ptr); 277 seq_num = mwifiex_11n_find_last_seq_num(ctx->ptr);
234 278
235 if (start_win < 0) 279 if (seq_num < 0)
236 return; 280 return;
237 281
238 dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win); 282 dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
239 mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr, 283 start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
240 (ctx->ptr->start_win + start_win + 1) & 284 mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
241 (MAX_TID_VALUE - 1)); 285 start_win);
242} 286}
243 287
244/* 288/*
@@ -267,7 +311,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
267 */ 311 */
268 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 312 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
269 if (tbl) { 313 if (tbl) {
270 mwifiex_11n_dispatch_pkt(priv, tbl, seq_num); 314 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
271 return; 315 return;
272 } 316 }
273 /* if !tbl then create one */ 317 /* if !tbl then create one */
@@ -279,6 +323,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
279 new_node->tid = tid; 323 new_node->tid = tid;
280 memcpy(new_node->ta, ta, ETH_ALEN); 324 memcpy(new_node->ta, ta, ETH_ALEN);
281 new_node->start_win = seq_num; 325 new_node->start_win = seq_num;
326 new_node->init_win = seq_num;
327 new_node->flags = 0;
282 328
283 if (mwifiex_queuing_ra_based(priv)) { 329 if (mwifiex_queuing_ra_based(priv)) {
284 dev_dbg(priv->adapter->dev, 330 dev_dbg(priv->adapter->dev,
@@ -290,15 +336,20 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
290 last_seq = node->rx_seq[tid]; 336 last_seq = node->rx_seq[tid];
291 } 337 }
292 } else { 338 } else {
293 last_seq = priv->rx_seq[tid]; 339 node = mwifiex_get_sta_entry(priv, ta);
340 if (node)
341 last_seq = node->rx_seq[tid];
342 else
343 last_seq = priv->rx_seq[tid];
294 } 344 }
295 345
296 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && 346 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
297 last_seq >= new_node->start_win) 347 last_seq >= new_node->start_win) {
298 new_node->start_win = last_seq + 1; 348 new_node->start_win = last_seq + 1;
349 new_node->flags |= RXREOR_INIT_WINDOW_SHIFT;
350 }
299 351
300 new_node->win_size = win_size; 352 new_node->win_size = win_size;
301 new_node->flags = 0;
302 353
303 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size, 354 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
304 GFP_KERNEL); 355 GFP_KERNEL);
@@ -358,10 +409,28 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
358 *cmd_addba_req) 409 *cmd_addba_req)
359{ 410{
360 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp; 411 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
412 struct mwifiex_sta_node *sta_ptr;
413 u32 rx_win_size = priv->add_ba_param.rx_win_size;
361 u8 tid; 414 u8 tid;
362 int win_size; 415 int win_size;
363 uint16_t block_ack_param_set; 416 uint16_t block_ack_param_set;
364 417
418 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
419 ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
420 priv->adapter->is_hw_11ac_capable &&
421 memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
422 sta_ptr = mwifiex_get_sta_entry(priv,
423 cmd_addba_req->peer_mac_addr);
424 if (!sta_ptr) {
425 dev_warn(priv->adapter->dev,
426 "BA setup with unknown TDLS peer %pM!\n",
427 cmd_addba_req->peer_mac_addr);
428 return -1;
429 }
430 if (sta_ptr->is_11ac_enabled)
431 rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
432 }
433
365 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP); 434 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
366 cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN); 435 cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
367 436
@@ -376,10 +445,12 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
376 >> BLOCKACKPARAM_TID_POS; 445 >> BLOCKACKPARAM_TID_POS;
377 add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT); 446 add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
378 block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK; 447 block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
379 /* We donot support AMSDU inside AMPDU, hence reset the bit */ 448
380 block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK; 449 /* If we don't support AMSDU inside AMPDU, reset the bit */
381 block_ack_param_set |= (priv->add_ba_param.rx_win_size << 450 if (!priv->add_ba_param.rx_amsdu ||
382 BLOCKACKPARAM_WINSIZE_POS); 451 (priv->aggr_prio_tbl[tid].amsdu == BA_STREAM_NOT_ALLOWED))
452 block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
453 block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
383 add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set); 454 add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
384 win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set) 455 win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
385 & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) 456 & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
@@ -431,33 +502,46 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
431 struct mwifiex_rx_reorder_tbl *tbl; 502 struct mwifiex_rx_reorder_tbl *tbl;
432 int start_win, end_win, win_size; 503 int start_win, end_win, win_size;
433 u16 pkt_index; 504 u16 pkt_index;
505 bool init_window_shift = false;
434 506
435 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 507 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
436 if (!tbl) { 508 if (!tbl) {
437 if (pkt_type != PKT_TYPE_BAR) { 509 if (pkt_type != PKT_TYPE_BAR)
438 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) 510 mwifiex_11n_dispatch_pkt(priv, payload);
439 mwifiex_handle_uap_rx_forward(priv, payload);
440 else
441 mwifiex_process_rx_packet(priv, payload);
442 }
443 return 0; 511 return 0;
444 } 512 }
513
514 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
515 mwifiex_11n_dispatch_pkt(priv, payload);
516 return 0;
517 }
518
445 start_win = tbl->start_win; 519 start_win = tbl->start_win;
446 win_size = tbl->win_size; 520 win_size = tbl->win_size;
447 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); 521 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
448 del_timer(&tbl->timer_context.timer); 522 if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
523 init_window_shift = true;
524 tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
525 }
449 mod_timer(&tbl->timer_context.timer, 526 mod_timer(&tbl->timer_context.timer,
450 jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size)); 527 jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
451 528
452 /*
453 * If seq_num is less then starting win then ignore and drop the
454 * packet
455 */
456 if (tbl->flags & RXREOR_FORCE_NO_DROP) { 529 if (tbl->flags & RXREOR_FORCE_NO_DROP) {
457 dev_dbg(priv->adapter->dev, 530 dev_dbg(priv->adapter->dev,
458 "RXREOR_FORCE_NO_DROP when HS is activated\n"); 531 "RXREOR_FORCE_NO_DROP when HS is activated\n");
459 tbl->flags &= ~RXREOR_FORCE_NO_DROP; 532 tbl->flags &= ~RXREOR_FORCE_NO_DROP;
533 } else if (init_window_shift && seq_num < start_win &&
534 seq_num >= tbl->init_win) {
535 dev_dbg(priv->adapter->dev,
536 "Sender TID sequence number reset %d->%d for SSN %d\n",
537 start_win, seq_num, tbl->init_win);
538 tbl->start_win = start_win = seq_num;
539 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
460 } else { 540 } else {
541 /*
542 * If seq_num is less then starting win then ignore and drop
543 * the packet
544 */
461 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) { 545 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
462 if (seq_num >= ((start_win + TWOPOW11) & 546 if (seq_num >= ((start_win + TWOPOW11) &
463 (MAX_TID_VALUE - 1)) && 547 (MAX_TID_VALUE - 1)) &&
@@ -485,7 +569,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
485 start_win = (end_win - win_size) + 1; 569 start_win = (end_win - win_size) + 1;
486 else 570 else
487 start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1; 571 start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
488 mwifiex_11n_dispatch_pkt(priv, tbl, start_win); 572 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
489 } 573 }
490 574
491 if (pkt_type != PKT_TYPE_BAR) { 575 if (pkt_type != PKT_TYPE_BAR) {
@@ -576,16 +660,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
576 * Check if we had rejected the ADDBA, if yes then do not create 660 * Check if we had rejected the ADDBA, if yes then do not create
577 * the stream 661 * the stream
578 */ 662 */
579 if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) { 663 if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
580 win_size = (block_ack_param_set &
581 IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
582 >> BLOCKACKPARAM_WINSIZE_POS;
583
584 dev_dbg(priv->adapter->dev,
585 "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
586 add_ba_rsp->peer_mac_addr, tid,
587 add_ba_rsp->ssn, win_size);
588 } else {
589 dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n", 664 dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
590 add_ba_rsp->peer_mac_addr, tid); 665 add_ba_rsp->peer_mac_addr, tid);
591 666
@@ -593,8 +668,28 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
593 add_ba_rsp->peer_mac_addr); 668 add_ba_rsp->peer_mac_addr);
594 if (tbl) 669 if (tbl)
595 mwifiex_del_rx_reorder_entry(priv, tbl); 670 mwifiex_del_rx_reorder_entry(priv, tbl);
671
672 return 0;
596 } 673 }
597 674
675 win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
676 >> BLOCKACKPARAM_WINSIZE_POS;
677
678 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
679 add_ba_rsp->peer_mac_addr);
680 if (tbl) {
681 if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
682 priv->add_ba_param.rx_amsdu &&
683 (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
684 tbl->amsdu = true;
685 else
686 tbl->amsdu = false;
687 }
688
689 dev_dbg(priv->adapter->dev,
690 "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
691 add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
692
598 return 0; 693 return 0;
599} 694}
600 695
@@ -615,7 +710,7 @@ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
615 delba.del_ba_param_set |= cpu_to_le16( 710 delba.del_ba_param_set |= cpu_to_le16(
616 (u16) event->origninator << DELBA_INITIATOR_POS); 711 (u16) event->origninator << DELBA_INITIATOR_POS);
617 delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT); 712 delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
618 mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba); 713 mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba, false);
619} 714}
620 715
621/* 716/*
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 4064041ac852..0fc76e4a60f8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -42,7 +42,8 @@
42#define BA_SETUP_PACKET_OFFSET 16 42#define BA_SETUP_PACKET_OFFSET 16
43 43
44enum mwifiex_rxreor_flags { 44enum mwifiex_rxreor_flags {
45 RXREOR_FORCE_NO_DROP = 1<<0, 45 RXREOR_FORCE_NO_DROP = 1<<0,
46 RXREOR_INIT_WINDOW_SHIFT = 1<<1,
46}; 47};
47 48
48static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv) 49static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index a42a506fd32b..2aa208ffbe23 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -41,6 +41,7 @@ mwifiex-y += uap_txrx.o
41mwifiex-y += cfg80211.o 41mwifiex-y += cfg80211.o
42mwifiex-y += ethtool.o 42mwifiex-y += ethtool.o
43mwifiex-y += 11h.o 43mwifiex-y += 11h.o
44mwifiex-y += tdls.o
44mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o 45mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
45obj-$(CONFIG_MWIFIEX) += mwifiex.o 46obj-$(CONFIG_MWIFIEX) += mwifiex.o
46 47
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index 3d64613ebb29..b9242c3dca43 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -131,7 +131,7 @@ info
131 hs_configured = <0/1, host sleep not configured/configured> 131 hs_configured = <0/1, host sleep not configured/configured>
132 hs_activated = <0/1, extended host sleep not activated/activated> 132 hs_activated = <0/1, extended host sleep not activated/activated>
133 num_tx_timeout = <number of Tx timeout> 133 num_tx_timeout = <number of Tx timeout>
134 num_cmd_timeout = <number of timeout commands> 134 is_cmd_timedout = <0/1 command timeout not occurred/occurred>
135 timeout_cmd_id = <command id of the last timeout command> 135 timeout_cmd_id = <command id of the last timeout command>
136 timeout_cmd_act = <command action of the last timeout command> 136 timeout_cmd_act = <command action of the last timeout command>
137 last_cmd_id = <command id of the last several commands sent to device> 137 last_cmd_id = <command id of the last several commands sent to device>
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8bfc07cd330e..21ee27ab7b74 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -252,9 +252,9 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
252 252
253 if (mask != priv->mgmt_frame_mask) { 253 if (mask != priv->mgmt_frame_mask) {
254 priv->mgmt_frame_mask = mask; 254 priv->mgmt_frame_mask = mask;
255 mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG, 255 mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
256 HostCmd_ACT_GEN_SET, 0, 256 HostCmd_ACT_GEN_SET, 0,
257 &priv->mgmt_frame_mask); 257 &priv->mgmt_frame_mask, false);
258 wiphy_dbg(wiphy, "info: mgmt frame registered\n"); 258 wiphy_dbg(wiphy, "info: mgmt frame registered\n");
259 } 259 }
260} 260}
@@ -515,8 +515,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
515 515
516 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 516 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
517 517
518 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, 518 if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
519 HostCmd_ACT_GEN_SET, 0, NULL)) { 519 HostCmd_ACT_GEN_SET, 0, NULL, false)) {
520 wiphy_err(wiphy, "11D: setting domain info in FW\n"); 520 wiphy_err(wiphy, "11D: setting domain info in FW\n");
521 return -1; 521 return -1;
522 } 522 }
@@ -580,9 +580,9 @@ mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
580 frag_thr > MWIFIEX_FRAG_MAX_VALUE) 580 frag_thr > MWIFIEX_FRAG_MAX_VALUE)
581 frag_thr = MWIFIEX_FRAG_MAX_VALUE; 581 frag_thr = MWIFIEX_FRAG_MAX_VALUE;
582 582
583 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 583 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
584 HostCmd_ACT_GEN_SET, FRAG_THRESH_I, 584 HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
585 &frag_thr); 585 &frag_thr, true);
586} 586}
587 587
588/* 588/*
@@ -597,9 +597,9 @@ mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
597 if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE) 597 if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE)
598 rts_thr = MWIFIEX_RTS_MAX_VALUE; 598 rts_thr = MWIFIEX_RTS_MAX_VALUE;
599 599
600 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 600 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
601 HostCmd_ACT_GEN_SET, RTS_THRESH_I, 601 HostCmd_ACT_GEN_SET, RTS_THRESH_I,
602 &rts_thr); 602 &rts_thr, true);
603} 603}
604 604
605/* 605/*
@@ -637,20 +637,19 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
637 637
638 bss_started = priv->bss_started; 638 bss_started = priv->bss_started;
639 639
640 ret = mwifiex_send_cmd_sync(priv, 640 ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
641 HostCmd_CMD_UAP_BSS_STOP, 641 HostCmd_ACT_GEN_SET, 0,
642 HostCmd_ACT_GEN_SET, 0, 642 NULL, true);
643 NULL);
644 if (ret) { 643 if (ret) {
645 wiphy_err(wiphy, "Failed to stop the BSS\n"); 644 wiphy_err(wiphy, "Failed to stop the BSS\n");
646 kfree(bss_cfg); 645 kfree(bss_cfg);
647 return ret; 646 return ret;
648 } 647 }
649 648
650 ret = mwifiex_send_cmd_async(priv, 649 ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
651 HostCmd_CMD_UAP_SYS_CONFIG, 650 HostCmd_ACT_GEN_SET,
652 HostCmd_ACT_GEN_SET, 651 UAP_BSS_PARAMS_I, bss_cfg,
653 UAP_BSS_PARAMS_I, bss_cfg); 652 false);
654 653
655 kfree(bss_cfg); 654 kfree(bss_cfg);
656 655
@@ -662,10 +661,9 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
662 if (!bss_started) 661 if (!bss_started)
663 break; 662 break;
664 663
665 ret = mwifiex_send_cmd_async(priv, 664 ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
666 HostCmd_CMD_UAP_BSS_START, 665 HostCmd_ACT_GEN_SET, 0,
667 HostCmd_ACT_GEN_SET, 0, 666 NULL, false);
668 NULL);
669 if (ret) { 667 if (ret) {
670 wiphy_err(wiphy, "Failed to start BSS\n"); 668 wiphy_err(wiphy, "Failed to start BSS\n");
671 return ret; 669 return ret;
@@ -700,8 +698,8 @@ mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
700 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) 698 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
701 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA); 699 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
702 700
703 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG, 701 if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
704 HostCmd_ACT_GEN_SET, 0, &mode)) 702 HostCmd_ACT_GEN_SET, 0, &mode, true))
705 return -1; 703 return -1;
706 704
707 return 0; 705 return 0;
@@ -721,13 +719,13 @@ mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
721 return -1; 719 return -1;
722 720
723 mode = P2P_MODE_DEVICE; 721 mode = P2P_MODE_DEVICE;
724 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG, 722 if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
725 HostCmd_ACT_GEN_SET, 0, &mode)) 723 HostCmd_ACT_GEN_SET, 0, &mode, true))
726 return -1; 724 return -1;
727 725
728 mode = P2P_MODE_CLIENT; 726 mode = P2P_MODE_CLIENT;
729 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG, 727 if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
730 HostCmd_ACT_GEN_SET, 0, &mode)) 728 HostCmd_ACT_GEN_SET, 0, &mode, true))
731 return -1; 729 return -1;
732 730
733 return 0; 731 return 0;
@@ -747,13 +745,13 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
747 return -1; 745 return -1;
748 746
749 mode = P2P_MODE_DEVICE; 747 mode = P2P_MODE_DEVICE;
750 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG, 748 if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
751 HostCmd_ACT_GEN_SET, 0, &mode)) 749 HostCmd_ACT_GEN_SET, 0, &mode, true))
752 return -1; 750 return -1;
753 751
754 mode = P2P_MODE_GO; 752 mode = P2P_MODE_GO;
755 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG, 753 if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
756 HostCmd_ACT_GEN_SET, 0, &mode)) 754 HostCmd_ACT_GEN_SET, 0, &mode, true))
757 return -1; 755 return -1;
758 756
759 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) 757 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
@@ -853,8 +851,8 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
853 851
854 priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM; 852 priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
855 853
856 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE, 854 ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
857 HostCmd_ACT_GEN_SET, 0, NULL); 855 HostCmd_ACT_GEN_SET, 0, NULL, true);
858 856
859 return ret; 857 return ret;
860} 858}
@@ -942,8 +940,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
942 STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG; 940 STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
943 941
944 /* Get signal information from the firmware */ 942 /* Get signal information from the firmware */
945 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO, 943 if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
946 HostCmd_ACT_GEN_GET, 0, NULL)) { 944 HostCmd_ACT_GEN_GET, 0, NULL, true)) {
947 dev_err(priv->adapter->dev, "failed to get signal information\n"); 945 dev_err(priv->adapter->dev, "failed to get signal information\n");
948 return -EFAULT; 946 return -EFAULT;
949 } 947 }
@@ -954,9 +952,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
954 } 952 }
955 953
956 /* Get DTIM period information from firmware */ 954 /* Get DTIM period information from firmware */
957 mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 955 mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
958 HostCmd_ACT_GEN_GET, DTIM_PERIOD_I, 956 HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
959 &priv->dtim_period); 957 &priv->dtim_period, true);
960 958
961 mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate); 959 mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate);
962 960
@@ -1160,9 +1158,10 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
1160 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1158 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1161 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE]; 1159 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
1162 enum ieee80211_band band; 1160 enum ieee80211_band band;
1161 struct mwifiex_adapter *adapter = priv->adapter;
1163 1162
1164 if (!priv->media_connected) { 1163 if (!priv->media_connected) {
1165 dev_err(priv->adapter->dev, 1164 dev_err(adapter->dev,
1166 "Can not set Tx data rate in disconnected state\n"); 1165 "Can not set Tx data rate in disconnected state\n");
1167 return -EINVAL; 1166 return -EINVAL;
1168 } 1167 }
@@ -1183,11 +1182,18 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
1183 1182
1184 /* Fill HT MCS rates */ 1183 /* Fill HT MCS rates */
1185 bitmap_rates[2] = mask->control[band].ht_mcs[0]; 1184 bitmap_rates[2] = mask->control[band].ht_mcs[0];
1186 if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) 1185 if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
1187 bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8; 1186 bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8;
1188 1187
1189 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG, 1188 /* Fill VHT MCS rates */
1190 HostCmd_ACT_GEN_SET, 0, bitmap_rates); 1189 if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
1190 bitmap_rates[10] = mask->control[band].vht_mcs[0];
1191 if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
1192 bitmap_rates[11] = mask->control[band].vht_mcs[1];
1193 }
1194
1195 return mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
1196 HostCmd_ACT_GEN_SET, 0, bitmap_rates, true);
1191} 1197}
1192 1198
1193/* 1199/*
@@ -1216,14 +1222,14 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
1216 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold); 1222 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold);
1217 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1; 1223 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
1218 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1; 1224 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
1219 return mwifiex_send_cmd_sync(priv, 1225 return mwifiex_send_cmd(priv,
1220 HostCmd_CMD_802_11_SUBSCRIBE_EVENT, 1226 HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
1221 0, 0, &subsc_evt); 1227 0, 0, &subsc_evt, true);
1222 } else { 1228 } else {
1223 subsc_evt.action = HostCmd_ACT_BITWISE_CLR; 1229 subsc_evt.action = HostCmd_ACT_BITWISE_CLR;
1224 return mwifiex_send_cmd_sync(priv, 1230 return mwifiex_send_cmd(priv,
1225 HostCmd_CMD_802_11_SUBSCRIBE_EVENT, 1231 HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
1226 0, 0, &subsc_evt); 1232 0, 0, &subsc_evt, true);
1227 } 1233 }
1228 1234
1229 return 0; 1235 return 0;
@@ -1276,10 +1282,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1276 if (!mac || is_broadcast_ether_addr(mac)) { 1282 if (!mac || is_broadcast_ether_addr(mac)) {
1277 wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__); 1283 wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
1278 list_for_each_entry(sta_node, &priv->sta_list, list) { 1284 list_for_each_entry(sta_node, &priv->sta_list, list) {
1279 if (mwifiex_send_cmd_sync(priv, 1285 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
1280 HostCmd_CMD_UAP_STA_DEAUTH, 1286 HostCmd_ACT_GEN_SET, 0,
1281 HostCmd_ACT_GEN_SET, 0, 1287 sta_node->mac_addr, true))
1282 sta_node->mac_addr))
1283 return -1; 1288 return -1;
1284 mwifiex_uap_del_sta_data(priv, sta_node); 1289 mwifiex_uap_del_sta_data(priv, sta_node);
1285 } 1290 }
@@ -1289,10 +1294,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1289 sta_node = mwifiex_get_sta_entry(priv, mac); 1294 sta_node = mwifiex_get_sta_entry(priv, mac);
1290 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 1295 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
1291 if (sta_node) { 1296 if (sta_node) {
1292 if (mwifiex_send_cmd_sync(priv, 1297 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
1293 HostCmd_CMD_UAP_STA_DEAUTH, 1298 HostCmd_ACT_GEN_SET, 0,
1294 HostCmd_ACT_GEN_SET, 0, 1299 sta_node->mac_addr, true))
1295 sta_node->mac_addr))
1296 return -1; 1300 return -1;
1297 mwifiex_uap_del_sta_data(priv, sta_node); 1301 mwifiex_uap_del_sta_data(priv, sta_node);
1298 } 1302 }
@@ -1328,13 +1332,40 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
1328 tx_ant = RF_ANTENNA_AUTO; 1332 tx_ant = RF_ANTENNA_AUTO;
1329 rx_ant = RF_ANTENNA_AUTO; 1333 rx_ant = RF_ANTENNA_AUTO;
1330 } 1334 }
1335 } else {
1336 struct ieee80211_sta_ht_cap *ht_info;
1337 int rx_mcs_supp;
1338 enum ieee80211_band band;
1339
1340 if ((tx_ant == 0x1 && rx_ant == 0x1)) {
1341 adapter->user_dev_mcs_support = HT_STREAM_1X1;
1342 if (adapter->is_hw_11ac_capable)
1343 adapter->usr_dot_11ac_mcs_support =
1344 MWIFIEX_11AC_MCS_MAP_1X1;
1345 } else {
1346 adapter->user_dev_mcs_support = HT_STREAM_2X2;
1347 if (adapter->is_hw_11ac_capable)
1348 adapter->usr_dot_11ac_mcs_support =
1349 MWIFIEX_11AC_MCS_MAP_2X2;
1350 }
1351
1352 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1353 if (!adapter->wiphy->bands[band])
1354 continue;
1355
1356 ht_info = &adapter->wiphy->bands[band]->ht_cap;
1357 rx_mcs_supp =
1358 GET_RXMCSSUPP(adapter->user_dev_mcs_support);
1359 memset(&ht_info->mcs, 0, adapter->number_of_antenna);
1360 memset(&ht_info->mcs, 0xff, rx_mcs_supp);
1361 }
1331 } 1362 }
1332 1363
1333 ant_cfg.tx_ant = tx_ant; 1364 ant_cfg.tx_ant = tx_ant;
1334 ant_cfg.rx_ant = rx_ant; 1365 ant_cfg.rx_ant = rx_ant;
1335 1366
1336 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_ANTENNA, 1367 return mwifiex_send_cmd(priv, HostCmd_CMD_RF_ANTENNA,
1337 HostCmd_ACT_GEN_SET, 0, &ant_cfg); 1368 HostCmd_ACT_GEN_SET, 0, &ant_cfg, true);
1338} 1369}
1339 1370
1340/* cfg80211 operation handler for stop ap. 1371/* cfg80211 operation handler for stop ap.
@@ -1349,8 +1380,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1349 1380
1350 priv->ap_11n_enabled = 0; 1381 priv->ap_11n_enabled = 0;
1351 1382
1352 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1383 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
1353 HostCmd_ACT_GEN_SET, 0, NULL)) { 1384 HostCmd_ACT_GEN_SET, 0, NULL, true)) {
1354 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1385 wiphy_err(wiphy, "Failed to stop the BSS\n");
1355 return -1; 1386 return -1;
1356 } 1387 }
@@ -1416,9 +1447,6 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1416 1447
1417 if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT) 1448 if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
1418 config_bands |= BAND_GN; 1449 config_bands |= BAND_GN;
1419
1420 if (params->chandef.width > NL80211_CHAN_WIDTH_40)
1421 config_bands |= BAND_GAC;
1422 } else { 1450 } else {
1423 bss_cfg->band_cfg = BAND_CONFIG_A; 1451 bss_cfg->band_cfg = BAND_CONFIG_A;
1424 config_bands = BAND_A; 1452 config_bands = BAND_A;
@@ -1464,16 +1492,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1464 bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout; 1492 bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
1465 } 1493 }
1466 1494
1467 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1495 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
1468 HostCmd_ACT_GEN_SET, 0, NULL)) { 1496 HostCmd_ACT_GEN_SET, 0, NULL, true)) {
1469 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1497 wiphy_err(wiphy, "Failed to stop the BSS\n");
1470 kfree(bss_cfg); 1498 kfree(bss_cfg);
1471 return -1; 1499 return -1;
1472 } 1500 }
1473 1501
1474 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG, 1502 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
1475 HostCmd_ACT_GEN_SET, 1503 HostCmd_ACT_GEN_SET,
1476 UAP_BSS_PARAMS_I, bss_cfg)) { 1504 UAP_BSS_PARAMS_I, bss_cfg, false)) {
1477 wiphy_err(wiphy, "Failed to set the SSID\n"); 1505 wiphy_err(wiphy, "Failed to set the SSID\n");
1478 kfree(bss_cfg); 1506 kfree(bss_cfg);
1479 return -1; 1507 return -1;
@@ -1481,8 +1509,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1481 1509
1482 kfree(bss_cfg); 1510 kfree(bss_cfg);
1483 1511
1484 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_BSS_START, 1512 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
1485 HostCmd_ACT_GEN_SET, 0, NULL)) { 1513 HostCmd_ACT_GEN_SET, 0, NULL, false)) {
1486 wiphy_err(wiphy, "Failed to start the BSS\n"); 1514 wiphy_err(wiphy, "Failed to start the BSS\n");
1487 return -1; 1515 return -1;
1488 } 1516 }
@@ -1492,9 +1520,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1492 else 1520 else
1493 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE; 1521 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
1494 1522
1495 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL, 1523 if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
1496 HostCmd_ACT_GEN_SET, 0, 1524 HostCmd_ACT_GEN_SET, 0,
1497 &priv->curr_pkt_filter)) 1525 &priv->curr_pkt_filter, true))
1498 return -1; 1526 return -1;
1499 1527
1500 return 0; 1528 return 0;
@@ -1583,8 +1611,9 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
1583 * the function notifies the CFG802.11 subsystem of the new BSS connection. 1611 * the function notifies the CFG802.11 subsystem of the new BSS connection.
1584 */ 1612 */
1585static int 1613static int
1586mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid, 1614mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
1587 u8 *bssid, int mode, struct ieee80211_channel *channel, 1615 const u8 *ssid, const u8 *bssid, int mode,
1616 struct ieee80211_channel *channel,
1588 struct cfg80211_connect_params *sme, bool privacy) 1617 struct cfg80211_connect_params *sme, bool privacy)
1589{ 1618{
1590 struct cfg80211_ssid req_ssid; 1619 struct cfg80211_ssid req_ssid;
@@ -1881,7 +1910,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1881 params->privacy); 1910 params->privacy);
1882done: 1911done:
1883 if (!ret) { 1912 if (!ret) {
1884 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL); 1913 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
1914 params->chandef.chan, GFP_KERNEL);
1885 dev_dbg(priv->adapter->dev, 1915 dev_dbg(priv->adapter->dev,
1886 "info: joined/created adhoc network with bssid" 1916 "info: joined/created adhoc network with bssid"
1887 " %pM successfully\n", priv->cfg_bssid); 1917 " %pM successfully\n", priv->cfg_bssid);
@@ -2070,10 +2100,10 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
2070 else 2100 else
2071 ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40; 2101 ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
2072 2102
2073 if (ISSUPP_RXSTBC(adapter->hw_dot_11n_dev_cap)) 2103 if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
2074 ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT; 2104 ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
2075 else 2105 else
2076 ht_info->cap &= ~(3 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 2106 ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
2077 2107
2078 if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap)) 2108 if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap))
2079 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; 2109 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
@@ -2098,8 +2128,8 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
2098 ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU; 2128 ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
2099 ht_info->cap |= IEEE80211_HT_CAP_SM_PS; 2129 ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
2100 2130
2101 rx_mcs_supp = GET_RXMCSSUPP(adapter->hw_dev_mcs_support); 2131 rx_mcs_supp = GET_RXMCSSUPP(adapter->user_dev_mcs_support);
2102 /* Set MCS for 1x1 */ 2132 /* Set MCS for 1x1/2x2 */
2103 memset(mcs, 0xff, rx_mcs_supp); 2133 memset(mcs, 0xff, rx_mcs_supp);
2104 /* Clear all the other values */ 2134 /* Clear all the other values */
2105 memset(&mcs[rx_mcs_supp], 0, 2135 memset(&mcs[rx_mcs_supp], 0,
@@ -2460,9 +2490,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2460 MWIFIEX_CRITERIA_UNICAST | 2490 MWIFIEX_CRITERIA_UNICAST |
2461 MWIFIEX_CRITERIA_MULTICAST; 2491 MWIFIEX_CRITERIA_MULTICAST;
2462 2492
2463 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MEF_CFG, 2493 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
2464 HostCmd_ACT_GEN_SET, 0, 2494 HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
2465 &mef_cfg);
2466 2495
2467 kfree(mef_entry); 2496 kfree(mef_entry);
2468 return ret; 2497 return ret;
@@ -2574,9 +2603,9 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
2574 if (!coalesce) { 2603 if (!coalesce) {
2575 dev_dbg(adapter->dev, 2604 dev_dbg(adapter->dev,
2576 "Disable coalesce and reset all previous rules\n"); 2605 "Disable coalesce and reset all previous rules\n");
2577 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG, 2606 return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
2578 HostCmd_ACT_GEN_SET, 0, 2607 HostCmd_ACT_GEN_SET, 0,
2579 &coalesce_cfg); 2608 &coalesce_cfg, true);
2580 } 2609 }
2581 2610
2582 coalesce_cfg.num_of_rules = coalesce->n_rules; 2611 coalesce_cfg.num_of_rules = coalesce->n_rules;
@@ -2591,8 +2620,172 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
2591 } 2620 }
2592 } 2621 }
2593 2622
2594 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG, 2623 return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
2595 HostCmd_ACT_GEN_SET, 0, &coalesce_cfg); 2624 HostCmd_ACT_GEN_SET, 0, &coalesce_cfg, true);
2625}
2626
2627/* cfg80211 ops handler for tdls_mgmt.
2628 * Function prepares TDLS action frame packets and forwards them to FW
2629 */
2630static int
2631mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2632 u8 *peer, u8 action_code, u8 dialog_token,
2633 u16 status_code, u32 peer_capability,
2634 const u8 *extra_ies, size_t extra_ies_len)
2635{
2636 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2637 int ret;
2638
2639 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
2640 return -ENOTSUPP;
2641
2642 /* make sure we are in station mode and connected */
2643 if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
2644 return -ENOTSUPP;
2645
2646 switch (action_code) {
2647 case WLAN_TDLS_SETUP_REQUEST:
2648 dev_dbg(priv->adapter->dev,
2649 "Send TDLS Setup Request to %pM status_code=%d\n", peer,
2650 status_code);
2651 ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
2652 dialog_token, status_code,
2653 extra_ies, extra_ies_len);
2654 break;
2655 case WLAN_TDLS_SETUP_RESPONSE:
2656 dev_dbg(priv->adapter->dev,
2657 "Send TDLS Setup Response to %pM status_code=%d\n",
2658 peer, status_code);
2659 ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
2660 dialog_token, status_code,
2661 extra_ies, extra_ies_len);
2662 break;
2663 case WLAN_TDLS_SETUP_CONFIRM:
2664 dev_dbg(priv->adapter->dev,
2665 "Send TDLS Confirm to %pM status_code=%d\n", peer,
2666 status_code);
2667 ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
2668 dialog_token, status_code,
2669 extra_ies, extra_ies_len);
2670 break;
2671 case WLAN_TDLS_TEARDOWN:
2672 dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
2673 peer);
2674 ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
2675 dialog_token, status_code,
2676 extra_ies, extra_ies_len);
2677 break;
2678 case WLAN_TDLS_DISCOVERY_REQUEST:
2679 dev_dbg(priv->adapter->dev,
2680 "Send TDLS Discovery Request to %pM\n", peer);
2681 ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
2682 dialog_token, status_code,
2683 extra_ies, extra_ies_len);
2684 break;
2685 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
2686 dev_dbg(priv->adapter->dev,
2687 "Send TDLS Discovery Response to %pM\n", peer);
2688 ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
2689 dialog_token, status_code,
2690 extra_ies, extra_ies_len);
2691 break;
2692 default:
2693 dev_warn(priv->adapter->dev,
2694 "Unknown TDLS mgmt/action frame %pM\n", peer);
2695 ret = -EINVAL;
2696 break;
2697 }
2698
2699 return ret;
2700}
2701
2702static int
2703mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2704 u8 *peer, enum nl80211_tdls_operation action)
2705{
2706 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2707
2708 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
2709 !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
2710 return -ENOTSUPP;
2711
2712 /* make sure we are in station mode and connected */
2713 if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
2714 return -ENOTSUPP;
2715
2716 dev_dbg(priv->adapter->dev,
2717 "TDLS peer=%pM, oper=%d\n", peer, action);
2718
2719 switch (action) {
2720 case NL80211_TDLS_ENABLE_LINK:
2721 action = MWIFIEX_TDLS_ENABLE_LINK;
2722 break;
2723 case NL80211_TDLS_DISABLE_LINK:
2724 action = MWIFIEX_TDLS_DISABLE_LINK;
2725 break;
2726 case NL80211_TDLS_TEARDOWN:
2727 /* shouldn't happen!*/
2728 dev_warn(priv->adapter->dev,
2729 "tdls_oper: teardown from driver not supported\n");
2730 return -EINVAL;
2731 case NL80211_TDLS_SETUP:
2732 /* shouldn't happen!*/
2733 dev_warn(priv->adapter->dev,
2734 "tdls_oper: setup from driver not supported\n");
2735 return -EINVAL;
2736 case NL80211_TDLS_DISCOVERY_REQ:
2737 /* shouldn't happen!*/
2738 dev_warn(priv->adapter->dev,
2739 "tdls_oper: discovery from driver not supported\n");
2740 return -EINVAL;
2741 default:
2742 dev_err(priv->adapter->dev,
2743 "tdls_oper: operation not supported\n");
2744 return -ENOTSUPP;
2745 }
2746
2747 return mwifiex_tdls_oper(priv, peer, action);
2748}
2749
2750static int
2751mwifiex_cfg80211_add_station(struct wiphy *wiphy,
2752 struct net_device *dev,
2753 u8 *mac, struct station_parameters *params)
2754{
2755 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2756
2757 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
2758 return -ENOTSUPP;
2759
2760 /* make sure we are in station mode and connected */
2761 if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
2762 return -ENOTSUPP;
2763
2764 return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK);
2765}
2766
2767static int
2768mwifiex_cfg80211_change_station(struct wiphy *wiphy,
2769 struct net_device *dev,
2770 u8 *mac, struct station_parameters *params)
2771{
2772 int ret;
2773 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2774
2775 /* we support change_station handler only for TDLS peers*/
2776 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
2777 return -ENOTSUPP;
2778
2779 /* make sure we are in station mode and connected */
2780 if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
2781 return -ENOTSUPP;
2782
2783 priv->sta_params = params;
2784
2785 ret = mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CONFIG_LINK);
2786 priv->sta_params = NULL;
2787
2788 return ret;
2596} 2789}
2597 2790
2598/* station cfg80211 operations */ 2791/* station cfg80211 operations */
@@ -2630,6 +2823,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
2630 .set_wakeup = mwifiex_cfg80211_set_wakeup, 2823 .set_wakeup = mwifiex_cfg80211_set_wakeup,
2631#endif 2824#endif
2632 .set_coalesce = mwifiex_cfg80211_set_coalesce, 2825 .set_coalesce = mwifiex_cfg80211_set_coalesce,
2826 .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
2827 .tdls_oper = mwifiex_cfg80211_tdls_oper,
2828 .add_station = mwifiex_cfg80211_add_station,
2829 .change_station = mwifiex_cfg80211_change_station,
2633}; 2830};
2634 2831
2635#ifdef CONFIG_PM 2832#ifdef CONFIG_PM
@@ -2715,6 +2912,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2715 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | 2912 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
2716 WIPHY_FLAG_AP_UAPSD | 2913 WIPHY_FLAG_AP_UAPSD |
2717 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 2914 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
2915
2916 if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
2917 wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
2918 WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
2919
2718 wiphy->regulatory_flags |= 2920 wiphy->regulatory_flags |=
2719 REGULATORY_CUSTOM_REG | 2921 REGULATORY_CUSTOM_REG |
2720 REGULATORY_STRICT_REG; 2922 REGULATORY_STRICT_REG;
@@ -2736,7 +2938,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2736 2938
2737 wiphy->features |= NL80211_FEATURE_HT_IBSS | 2939 wiphy->features |= NL80211_FEATURE_HT_IBSS |
2738 NL80211_FEATURE_INACTIVITY_TIMER | 2940 NL80211_FEATURE_INACTIVITY_TIMER |
2739 NL80211_FEATURE_LOW_PRIORITY_SCAN; 2941 NL80211_FEATURE_LOW_PRIORITY_SCAN |
2942 NL80211_FEATURE_NEED_OBSS_SCAN;
2740 2943
2741 /* Reserve space for mwifiex specific private data for BSS */ 2944 /* Reserve space for mwifiex specific private data for BSS */
2742 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); 2945 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -2767,17 +2970,17 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2767 country_code); 2970 country_code);
2768 } 2971 }
2769 2972
2770 mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 2973 mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
2771 HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr); 2974 HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr, true);
2772 wiphy->frag_threshold = thr; 2975 wiphy->frag_threshold = thr;
2773 mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 2976 mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
2774 HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr); 2977 HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr, true);
2775 wiphy->rts_threshold = thr; 2978 wiphy->rts_threshold = thr;
2776 mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 2979 mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
2777 HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry); 2980 HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry, true);
2778 wiphy->retry_short = (u8) retry; 2981 wiphy->retry_short = (u8) retry;
2779 mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 2982 mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
2780 HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry); 2983 HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry, true);
2781 wiphy->retry_long = (u8) retry; 2984 wiphy->retry_long = (u8) retry;
2782 2985
2783 adapter->wiphy = wiphy; 2986 adapter->wiphy = wiphy;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 9eefacbc844b..0ddec3d4b059 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,95 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
71 71
72static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 }; 72static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
73 73
74/* For every mcs_rate line, the first 8 bytes are for stream 1x1,
75 * and all 16 bytes are for stream 2x2.
76 */
77static const u16 mcs_rate[4][16] = {
78 /* LGI 40M */
79 { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
80 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
81
82 /* SGI 40M */
83 { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
84 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
85
86 /* LGI 20M */
87 { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
88 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
89
90 /* SGI 20M */
91 { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
92 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
93};
94
95/* AC rates */
96static const u16 ac_mcs_rate_nss1[8][10] = {
97 /* LG 160M */
98 { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
99 0x492, 0x57C, 0x618 },
100
101 /* SG 160M */
102 { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
103 0x514, 0x618, 0x6C6 },
104
105 /* LG 80M */
106 { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
107 0x249, 0x2BE, 0x30C },
108
109 /* SG 80M */
110 { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
111 0x28A, 0x30C, 0x363 },
112
113 /* LG 40M */
114 { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
115 0x10E, 0x144, 0x168 },
116
117 /* SG 40M */
118 { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
119 0x12C, 0x168, 0x190 },
120
121 /* LG 20M */
122 { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
123
124 /* SG 20M */
125 { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
126};
127
128/* NSS2 note: the value in the table is 2 multiplier of the actual rate */
129static const u16 ac_mcs_rate_nss2[8][10] = {
130 /* LG 160M */
131 { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
132 0x924, 0xAF8, 0xC30 },
133
134 /* SG 160M */
135 { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
136 0xA28, 0xC30, 0xD8B },
137
138 /* LG 80M */
139 { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
140 0x492, 0x57C, 0x618 },
141
142 /* SG 80M */
143 { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
144 0x514, 0x618, 0x6C6 },
145
146 /* LG 40M */
147 { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
148 0x21C, 0x288, 0x2D0 },
149
150 /* SG 40M */
151 { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
152 0x258, 0x2D0, 0x320 },
153
154 /* LG 20M */
155 { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
156 0x138, 0x00 },
157
158 /* SG 20M */
159 { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
160 0x15B, 0x00 },
161};
162
74struct region_code_mapping { 163struct region_code_mapping {
75 u8 code; 164 u8 code;
76 u8 region[IEEE80211_COUNTRY_STRING_LEN]; 165 u8 region[IEEE80211_COUNTRY_STRING_LEN];
@@ -109,95 +198,6 @@ u8 *mwifiex_11d_code_2_region(u8 code)
109u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv, 198u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
110 u8 index, u8 ht_info) 199 u8 index, u8 ht_info)
111{ 200{
112 /*
113 * For every mcs_rate line, the first 8 bytes are for stream 1x1,
114 * and all 16 bytes are for stream 2x2.
115 */
116 u16 mcs_rate[4][16] = {
117 /* LGI 40M */
118 { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
119 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
120
121 /* SGI 40M */
122 { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
123 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
124
125 /* LGI 20M */
126 { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
127 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
128
129 /* SGI 20M */
130 { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
131 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
132 };
133 /* AC rates */
134 u16 ac_mcs_rate_nss1[8][10] = {
135 /* LG 160M */
136 { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
137 0x492, 0x57C, 0x618 },
138
139 /* SG 160M */
140 { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
141 0x514, 0x618, 0x6C6 },
142
143 /* LG 80M */
144 { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
145 0x249, 0x2BE, 0x30C },
146
147 /* SG 80M */
148 { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
149 0x28A, 0x30C, 0x363 },
150
151 /* LG 40M */
152 { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
153 0x10E, 0x144, 0x168 },
154
155 /* SG 40M */
156 { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
157 0x12C, 0x168, 0x190 },
158
159 /* LG 20M */
160 { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
161
162 /* SG 20M */
163 { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
164 };
165 /* NSS2 note: the value in the table is 2 multiplier of the actual
166 * rate
167 */
168 u16 ac_mcs_rate_nss2[8][10] = {
169 /* LG 160M */
170 { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
171 0x924, 0xAF8, 0xC30 },
172
173 /* SG 160M */
174 { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
175 0xA28, 0xC30, 0xD8B },
176
177 /* LG 80M */
178 { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
179 0x492, 0x57C, 0x618 },
180
181 /* SG 80M */
182 { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
183 0x514, 0x618, 0x6C6 },
184
185 /* LG 40M */
186 { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
187 0x21C, 0x288, 0x2D0 },
188
189 /* SG 40M */
190 { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
191 0x258, 0x2D0, 0x320 },
192
193 /* LG 20M */
194 { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
195 0x138, 0x00 },
196
197 /* SG 20M */
198 { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
199 0x15B, 0x00 },
200 };
201 u32 rate = 0; 201 u32 rate = 0;
202 u8 mcs_index = 0; 202 u8 mcs_index = 0;
203 u8 bw = 0; 203 u8 bw = 0;
@@ -252,28 +252,8 @@ u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
252u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, 252u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
253 u8 index, u8 ht_info) 253 u8 index, u8 ht_info)
254{ 254{
255 /* For every mcs_rate line, the first 8 bytes are for stream 1x1,
256 * and all 16 bytes are for stream 2x2.
257 */
258 u16 mcs_rate[4][16] = {
259 /* LGI 40M */
260 { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
261 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
262
263 /* SGI 40M */
264 { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
265 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
266
267 /* LGI 20M */
268 { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
269 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
270
271 /* SGI 20M */
272 { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
273 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
274 };
275 u32 mcs_num_supp = 255 u32 mcs_num_supp =
276 (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8; 256 (priv->adapter->user_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
277 u32 rate; 257 u32 rate;
278 258
279 if (priv->adapter->is_hw_11ac_capable) 259 if (priv->adapter->is_hw_11ac_capable)
@@ -458,7 +438,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
458 break; 438 break;
459 case BAND_G: 439 case BAND_G:
460 case BAND_G | BAND_GN: 440 case BAND_G | BAND_GN:
461 case BAND_G | BAND_GN | BAND_GAC:
462 dev_dbg(adapter->dev, "info: infra band=%d " 441 dev_dbg(adapter->dev, "info: infra band=%d "
463 "supported_rates_g\n", adapter->config_bands); 442 "supported_rates_g\n", adapter->config_bands);
464 k = mwifiex_copy_rates(rates, k, supported_rates_g, 443 k = mwifiex_copy_rates(rates, k, supported_rates_g,
@@ -469,10 +448,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
469 case BAND_A | BAND_B: 448 case BAND_A | BAND_B:
470 case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN: 449 case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
471 case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC: 450 case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
472 case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN |
473 BAND_AAC | BAND_GAC:
474 case BAND_B | BAND_G | BAND_GN: 451 case BAND_B | BAND_G | BAND_GN:
475 case BAND_B | BAND_G | BAND_GN | BAND_GAC:
476 dev_dbg(adapter->dev, "info: infra band=%d " 452 dev_dbg(adapter->dev, "info: infra band=%d "
477 "supported_rates_bg\n", adapter->config_bands); 453 "supported_rates_bg\n", adapter->config_bands);
478 k = mwifiex_copy_rates(rates, k, supported_rates_bg, 454 k = mwifiex_copy_rates(rates, k, supported_rates_bg,
@@ -496,7 +472,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
496 sizeof(supported_rates_a)); 472 sizeof(supported_rates_a));
497 break; 473 break;
498 case BAND_GN: 474 case BAND_GN:
499 case BAND_GN | BAND_GAC:
500 dev_dbg(adapter->dev, "info: infra band=%d " 475 dev_dbg(adapter->dev, "info: infra band=%d "
501 "supported_rates_n\n", adapter->config_bands); 476 "supported_rates_n\n", adapter->config_bands);
502 k = mwifiex_copy_rates(rates, k, supported_rates_n, 477 k = mwifiex_copy_rates(rates, k, supported_rates_n,
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1ddc8b2e3722..1062c918a7bf 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -37,13 +37,12 @@
37static void 37static void
38mwifiex_init_cmd_node(struct mwifiex_private *priv, 38mwifiex_init_cmd_node(struct mwifiex_private *priv,
39 struct cmd_ctrl_node *cmd_node, 39 struct cmd_ctrl_node *cmd_node,
40 u32 cmd_oid, void *data_buf) 40 u32 cmd_oid, void *data_buf, bool sync)
41{ 41{
42 cmd_node->priv = priv; 42 cmd_node->priv = priv;
43 cmd_node->cmd_oid = cmd_oid; 43 cmd_node->cmd_oid = cmd_oid;
44 if (priv->adapter->cmd_wait_q_required) { 44 if (sync) {
45 cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required; 45 cmd_node->wait_q_enabled = true;
46 priv->adapter->cmd_wait_q_required = false;
47 cmd_node->cmd_wait_q_woken = false; 46 cmd_node->cmd_wait_q_woken = false;
48 cmd_node->condition = &cmd_node->cmd_wait_q_woken; 47 cmd_node->condition = &cmd_node->cmd_wait_q_woken;
49 } 48 }
@@ -166,8 +165,10 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
166 dev_err(adapter->dev, 165 dev_err(adapter->dev,
167 "DNLD_CMD: FW in reset state, ignore cmd %#x\n", 166 "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
168 cmd_code); 167 cmd_code);
169 mwifiex_complete_cmd(adapter, cmd_node); 168 if (cmd_node->wait_q_enabled)
169 mwifiex_complete_cmd(adapter, cmd_node);
170 mwifiex_recycle_cmd_node(adapter, cmd_node); 170 mwifiex_recycle_cmd_node(adapter, cmd_node);
171 queue_work(adapter->workqueue, &adapter->main_work);
171 return -1; 172 return -1;
172 } 173 }
173 174
@@ -276,11 +277,11 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
276 277
277 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 278 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
278 279
280 adapter->seq_num++;
279 sleep_cfm_buf->seq_num = 281 sleep_cfm_buf->seq_num =
280 cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO 282 cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
281 (adapter->seq_num, priv->bss_num, 283 (adapter->seq_num, priv->bss_num,
282 priv->bss_type))); 284 priv->bss_type)));
283 adapter->seq_num++;
284 285
285 if (adapter->iface_type == MWIFIEX_USB) { 286 if (adapter->iface_type == MWIFIEX_USB) {
286 sleep_cfm_tmp = 287 sleep_cfm_tmp =
@@ -480,28 +481,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
480} 481}
481 482
482/* 483/*
483 * This function is used to send synchronous command to the firmware. 484 * This function prepares a command and send it to the firmware.
484 *
485 * it allocates a wait queue for the command and wait for the command
486 * response.
487 */
488int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
489 u16 cmd_action, u32 cmd_oid, void *data_buf)
490{
491 int ret = 0;
492 struct mwifiex_adapter *adapter = priv->adapter;
493
494 adapter->cmd_wait_q_required = true;
495
496 ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
497 data_buf);
498
499 return ret;
500}
501
502
503/*
504 * This function prepares a command and asynchronously send it to the firmware.
505 * 485 *
506 * Preparation includes - 486 * Preparation includes -
507 * - Sanity tests to make sure the card is still present or the FW 487 * - Sanity tests to make sure the card is still present or the FW
@@ -511,8 +491,8 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
511 * - Fill up the non-default parameters and buffer pointers 491 * - Fill up the non-default parameters and buffer pointers
512 * - Add the command to pending queue 492 * - Add the command to pending queue
513 */ 493 */
514int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no, 494int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
515 u16 cmd_action, u32 cmd_oid, void *data_buf) 495 u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync)
516{ 496{
517 int ret; 497 int ret;
518 struct mwifiex_adapter *adapter = priv->adapter; 498 struct mwifiex_adapter *adapter = priv->adapter;
@@ -529,11 +509,21 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
529 return -1; 509 return -1;
530 } 510 }
531 511
512 if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
513 dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
514 return -1;
515 }
516
532 if (adapter->surprise_removed) { 517 if (adapter->surprise_removed) {
533 dev_err(adapter->dev, "PREP_CMD: card is removed\n"); 518 dev_err(adapter->dev, "PREP_CMD: card is removed\n");
534 return -1; 519 return -1;
535 } 520 }
536 521
522 if (adapter->is_cmd_timedout) {
523 dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
524 return -1;
525 }
526
537 if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) { 527 if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
538 if (cmd_no != HostCmd_CMD_FUNC_INIT) { 528 if (cmd_no != HostCmd_CMD_FUNC_INIT) {
539 dev_err(adapter->dev, "PREP_CMD: FW in reset state\n"); 529 dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
@@ -550,7 +540,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
550 } 540 }
551 541
552 /* Initialize the command node */ 542 /* Initialize the command node */
553 mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf); 543 mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
554 544
555 if (!cmd_node->cmd_skb) { 545 if (!cmd_node->cmd_skb) {
556 dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n"); 546 dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
@@ -595,7 +585,8 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
595 } 585 }
596 586
597 /* Send command */ 587 /* Send command */
598 if (cmd_no == HostCmd_CMD_802_11_SCAN) { 588 if (cmd_no == HostCmd_CMD_802_11_SCAN ||
589 cmd_no == HostCmd_CMD_802_11_SCAN_EXT) {
599 mwifiex_queue_scan_cmd(priv, cmd_node); 590 mwifiex_queue_scan_cmd(priv, cmd_node);
600 } else { 591 } else {
601 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); 592 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
@@ -785,7 +776,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
785 unsigned long flags; 776 unsigned long flags;
786 777
787 /* Now we got response from FW, cancel the command timer */ 778 /* Now we got response from FW, cancel the command timer */
788 del_timer(&adapter->cmd_timer); 779 del_timer_sync(&adapter->cmd_timer);
789 780
790 if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) { 781 if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
791 resp = (struct host_cmd_ds_command *) adapter->upld_buf; 782 resp = (struct host_cmd_ds_command *) adapter->upld_buf;
@@ -794,7 +785,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
794 return -1; 785 return -1;
795 } 786 }
796 787
797 adapter->num_cmd_timeout = 0; 788 adapter->is_cmd_timedout = 0;
798 789
799 resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; 790 resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
800 if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) { 791 if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
@@ -905,8 +896,7 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
905 struct cmd_ctrl_node *cmd_node; 896 struct cmd_ctrl_node *cmd_node;
906 struct timeval tstamp; 897 struct timeval tstamp;
907 898
908 adapter->num_cmd_timeout++; 899 adapter->is_cmd_timedout = 1;
909 adapter->dbg.num_cmd_timeout++;
910 if (!adapter->curr_cmd) { 900 if (!adapter->curr_cmd) {
911 dev_dbg(adapter->dev, "cmd: empty curr_cmd\n"); 901 dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
912 return; 902 return;
@@ -929,8 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
929 dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n", 919 dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
930 adapter->dbg.num_cmd_host_to_card_failure); 920 adapter->dbg.num_cmd_host_to_card_failure);
931 921
932 dev_err(adapter->dev, "num_cmd_timeout = %d\n", 922 dev_err(adapter->dev, "is_cmd_timedout = %d\n",
933 adapter->dbg.num_cmd_timeout); 923 adapter->is_cmd_timedout);
934 dev_err(adapter->dev, "num_tx_timeout = %d\n", 924 dev_err(adapter->dev, "num_tx_timeout = %d\n",
935 adapter->dbg.num_tx_timeout); 925 adapter->dbg.num_tx_timeout);
936 926
@@ -987,13 +977,14 @@ void
987mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) 977mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
988{ 978{
989 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; 979 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
990 unsigned long flags; 980 unsigned long flags, cmd_flags;
981 struct mwifiex_private *priv;
982 int i;
991 983
984 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
992 /* Cancel current cmd */ 985 /* Cancel current cmd */
993 if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { 986 if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
994 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
995 adapter->curr_cmd->wait_q_enabled = false; 987 adapter->curr_cmd->wait_q_enabled = false;
996 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
997 adapter->cmd_wait_q.status = -1; 988 adapter->cmd_wait_q.status = -1;
998 mwifiex_complete_cmd(adapter, adapter->curr_cmd); 989 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
999 } 990 }
@@ -1013,6 +1004,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
1013 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); 1004 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
1014 } 1005 }
1015 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); 1006 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
1007 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
1016 1008
1017 /* Cancel all pending scan command */ 1009 /* Cancel all pending scan command */
1018 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 1010 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
@@ -1027,9 +1019,21 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
1027 } 1019 }
1028 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); 1020 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
1029 1021
1030 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 1022 if (adapter->scan_processing) {
1031 adapter->scan_processing = false; 1023 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
1032 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 1024 adapter->scan_processing = false;
1025 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
1026 for (i = 0; i < adapter->priv_num; i++) {
1027 priv = adapter->priv[i];
1028 if (!priv)
1029 continue;
1030 if (priv->scan_request) {
1031 dev_dbg(adapter->dev, "info: aborting scan\n");
1032 cfg80211_scan_done(priv->scan_request, 1);
1033 priv->scan_request = NULL;
1034 }
1035 }
1036 }
1033} 1037}
1034 1038
1035/* 1039/*
@@ -1048,7 +1052,8 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
1048 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; 1052 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
1049 unsigned long cmd_flags; 1053 unsigned long cmd_flags;
1050 unsigned long scan_pending_q_flags; 1054 unsigned long scan_pending_q_flags;
1051 bool cancel_scan_cmd = false; 1055 struct mwifiex_private *priv;
1056 int i;
1052 1057
1053 if ((adapter->curr_cmd) && 1058 if ((adapter->curr_cmd) &&
1054 (adapter->curr_cmd->wait_q_enabled)) { 1059 (adapter->curr_cmd->wait_q_enabled)) {
@@ -1074,15 +1079,24 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
1074 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 1079 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
1075 spin_lock_irqsave(&adapter->scan_pending_q_lock, 1080 spin_lock_irqsave(&adapter->scan_pending_q_lock,
1076 scan_pending_q_flags); 1081 scan_pending_q_flags);
1077 cancel_scan_cmd = true;
1078 } 1082 }
1079 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1083 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1080 scan_pending_q_flags); 1084 scan_pending_q_flags);
1081 1085
1082 if (cancel_scan_cmd) { 1086 if (adapter->scan_processing) {
1083 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); 1087 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
1084 adapter->scan_processing = false; 1088 adapter->scan_processing = false;
1085 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); 1089 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
1090 for (i = 0; i < adapter->priv_num; i++) {
1091 priv = adapter->priv[i];
1092 if (!priv)
1093 continue;
1094 if (priv->scan_request) {
1095 dev_dbg(adapter->dev, "info: aborting scan\n");
1096 cfg80211_scan_done(priv->scan_request, 1);
1097 priv->scan_request = NULL;
1098 }
1099 }
1086 } 1100 }
1087 adapter->cmd_wait_q.status = -1; 1101 adapter->cmd_wait_q.status = -1;
1088} 1102}
@@ -1454,7 +1468,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1454{ 1468{
1455 struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec; 1469 struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
1456 struct mwifiex_adapter *adapter = priv->adapter; 1470 struct mwifiex_adapter *adapter = priv->adapter;
1457 int i; 1471 struct mwifiex_ie_types_header *tlv;
1472 struct hw_spec_fw_api_rev *api_rev;
1473 u16 resp_size, api_id;
1474 int i, left_len, parsed_len = 0;
1458 1475
1459 adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info); 1476 adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
1460 1477
@@ -1490,6 +1507,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1490 } 1507 }
1491 1508
1492 adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number); 1509 adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
1510 adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff;
1493 adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna); 1511 adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
1494 1512
1495 if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) { 1513 if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) {
@@ -1498,8 +1516,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1498 /* Copy 11AC cap */ 1516 /* Copy 11AC cap */
1499 adapter->hw_dot_11ac_dev_cap = 1517 adapter->hw_dot_11ac_dev_cap =
1500 le32_to_cpu(hw_spec->dot_11ac_dev_cap); 1518 le32_to_cpu(hw_spec->dot_11ac_dev_cap);
1501 adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap; 1519 adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap
1502 adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap; 1520 & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
1521 adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap
1522 & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
1503 1523
1504 /* Copy 11AC mcs */ 1524 /* Copy 11AC mcs */
1505 adapter->hw_dot_11ac_mcs_support = 1525 adapter->hw_dot_11ac_mcs_support =
@@ -1510,6 +1530,46 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1510 adapter->is_hw_11ac_capable = false; 1530 adapter->is_hw_11ac_capable = false;
1511 } 1531 }
1512 1532
1533 resp_size = le16_to_cpu(resp->size) - S_DS_GEN;
1534 if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) {
1535 /* we have variable HW SPEC information */
1536 left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec);
1537 while (left_len > sizeof(struct mwifiex_ie_types_header)) {
1538 tlv = (void *)&hw_spec->tlvs + parsed_len;
1539 switch (le16_to_cpu(tlv->type)) {
1540 case TLV_TYPE_FW_API_REV:
1541 api_rev = (struct hw_spec_fw_api_rev *)tlv;
1542 api_id = le16_to_cpu(api_rev->api_id);
1543 switch (api_id) {
1544 case KEY_API_VER_ID:
1545 adapter->fw_key_api_major_ver =
1546 api_rev->major_ver;
1547 adapter->fw_key_api_minor_ver =
1548 api_rev->minor_ver;
1549 dev_dbg(adapter->dev,
1550 "fw_key_api v%d.%d\n",
1551 adapter->fw_key_api_major_ver,
1552 adapter->fw_key_api_minor_ver);
1553 break;
1554 default:
1555 dev_warn(adapter->dev,
1556 "Unknown FW api_id: %d\n",
1557 api_id);
1558 break;
1559 }
1560 break;
1561 default:
1562 dev_warn(adapter->dev,
1563 "Unknown GET_HW_SPEC TLV type: %#x\n",
1564 le16_to_cpu(tlv->type));
1565 break;
1566 }
1567 parsed_len += le16_to_cpu(tlv->len) +
1568 sizeof(struct mwifiex_ie_types_header);
1569 left_len -= parsed_len;
1570 }
1571 }
1572
1513 dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n", 1573 dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
1514 adapter->fw_release_number); 1574 adapter->fw_release_number);
1515 dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n", 1575 dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
@@ -1538,6 +1598,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1538 1598
1539 adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap); 1599 adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
1540 adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support; 1600 adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support;
1601 adapter->user_dev_mcs_support = adapter->hw_dev_mcs_support;
1541 1602
1542 if (adapter->if_ops.update_mp_end_port) 1603 if (adapter->if_ops.update_mp_end_port)
1543 adapter->if_ops.update_mp_end_port(adapter, 1604 adapter->if_ops.update_mp_end_port(adapter,
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index a5f9875cfd6e..b8a49aad12fd 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -85,8 +85,8 @@ static struct mwifiex_debug_data items[] = {
85 item_addr(hs_activated), 1}, 85 item_addr(hs_activated), 1},
86 {"num_tx_timeout", item_size(num_tx_timeout), 86 {"num_tx_timeout", item_size(num_tx_timeout),
87 item_addr(num_tx_timeout), 1}, 87 item_addr(num_tx_timeout), 1},
88 {"num_cmd_timeout", item_size(num_cmd_timeout), 88 {"is_cmd_timedout", item_size(is_cmd_timedout),
89 item_addr(num_cmd_timeout), 1}, 89 item_addr(is_cmd_timedout), 1},
90 {"timeout_cmd_id", item_size(timeout_cmd_id), 90 {"timeout_cmd_id", item_size(timeout_cmd_id),
91 item_addr(timeout_cmd_id), 1}, 91 item_addr(timeout_cmd_id), 1},
92 {"timeout_cmd_act", item_size(timeout_cmd_act), 92 {"timeout_cmd_act", item_size(timeout_cmd_act),
@@ -493,7 +493,7 @@ mwifiex_regrdwr_write(struct file *file,
493{ 493{
494 unsigned long addr = get_zeroed_page(GFP_KERNEL); 494 unsigned long addr = get_zeroed_page(GFP_KERNEL);
495 char *buf = (char *) addr; 495 char *buf = (char *) addr;
496 size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1)); 496 size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
497 int ret; 497 int ret;
498 u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX; 498 u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX;
499 499
@@ -594,7 +594,7 @@ mwifiex_rdeeprom_write(struct file *file,
594{ 594{
595 unsigned long addr = get_zeroed_page(GFP_KERNEL); 595 unsigned long addr = get_zeroed_page(GFP_KERNEL);
596 char *buf = (char *) addr; 596 char *buf = (char *) addr;
597 size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1)); 597 size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
598 int ret = 0; 598 int ret = 0;
599 int offset = -1, bytes = -1; 599 int offset = -1, bytes = -1;
600 600
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 3a21bd03d6db..e7b3e16e5d34 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -75,10 +75,16 @@
75 75
76#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0) 76#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
77#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1) 77#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
78#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
78 79
79#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024 80#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
80#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128 81#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
81 82
83#define MWIFIEX_TDLS_DISABLE_LINK 0x00
84#define MWIFIEX_TDLS_ENABLE_LINK 0x01
85#define MWIFIEX_TDLS_CREATE_LINK 0x02
86#define MWIFIEX_TDLS_CONFIG_LINK 0x03
87
82enum mwifiex_bss_type { 88enum mwifiex_bss_type {
83 MWIFIEX_BSS_TYPE_STA = 0, 89 MWIFIEX_BSS_TYPE_STA = 0,
84 MWIFIEX_BSS_TYPE_UAP = 1, 90 MWIFIEX_BSS_TYPE_UAP = 1,
@@ -92,6 +98,23 @@ enum mwifiex_bss_role {
92 MWIFIEX_BSS_ROLE_ANY = 0xff, 98 MWIFIEX_BSS_ROLE_ANY = 0xff,
93}; 99};
94 100
101enum mwifiex_tdls_status {
102 TDLS_NOT_SETUP = 0,
103 TDLS_SETUP_INPROGRESS,
104 TDLS_SETUP_COMPLETE,
105 TDLS_SETUP_FAILURE,
106 TDLS_LINK_TEARDOWN,
107};
108
109enum mwifiex_tdls_error_code {
110 TDLS_ERR_NO_ERROR = 0,
111 TDLS_ERR_INTERNAL_ERROR,
112 TDLS_ERR_MAX_LINKS_EST,
113 TDLS_ERR_LINK_EXISTS,
114 TDLS_ERR_LINK_NONEXISTENT,
115 TDLS_ERR_PEER_STA_UNREACHABLE = 25,
116};
117
95#define BSS_ROLE_BIT_MASK BIT(0) 118#define BSS_ROLE_BIT_MASK BIT(0)
96 119
97#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK) 120#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5fa932d5f905..b485dc1ae5eb 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -50,21 +50,23 @@ struct tx_packet_hdr {
50#define HOSTCMD_SUPPORTED_RATES 14 50#define HOSTCMD_SUPPORTED_RATES 14
51#define N_SUPPORTED_RATES 3 51#define N_SUPPORTED_RATES 3
52#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \ 52#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \
53 BAND_AN | BAND_GAC | BAND_AAC) 53 BAND_AN | BAND_AAC)
54 54
55#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \ 55#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \
56 BIT(12) | BIT(13)) 56 BIT(13))
57#define IS_SUPPORT_MULTI_BANDS(adapter) \ 57#define IS_SUPPORT_MULTI_BANDS(adapter) \
58 (adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT) 58 (adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
59 59
60/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14 60/* bit 13: 11ac BAND_AAC
61 * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit 61 * bit 12: reserved for lab testing, will be reused for BAND_AN
62 * bit 14 for AAC, in order to be compatible with the band capability 62 * bit 11: 11n BAND_GN
63 * defined in the driver after right shift of 8 bits. 63 * bit 10: 11a BAND_A
64 * bit 9: 11g BAND_G
65 * bit 8: 11b BAND_B
66 * Map these bits to band capability by right shifting 8 bits.
64 */ 67 */
65#define GET_FW_DEFAULT_BANDS(adapter) \ 68#define GET_FW_DEFAULT_BANDS(adapter) \
66 (((((adapter->fw_cap_info & 0x3000) << 1) | \ 69 (((adapter->fw_cap_info & 0x2f00) >> 8) & \
67 (adapter->fw_cap_info & ~0xF000)) >> 8) & \
68 ALL_802_11_BANDS) 70 ALL_802_11_BANDS)
69 71
70#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff 72#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
@@ -77,12 +79,21 @@ enum KEY_TYPE_ID {
77 KEY_TYPE_ID_WAPI, 79 KEY_TYPE_ID_WAPI,
78 KEY_TYPE_ID_AES_CMAC, 80 KEY_TYPE_ID_AES_CMAC,
79}; 81};
82
83#define WPA_PN_SIZE 8
84#define KEY_PARAMS_FIXED_LEN 10
85#define KEY_INDEX_MASK 0xf
86#define FW_KEY_API_VER_MAJOR_V2 2
87
80#define KEY_MCAST BIT(0) 88#define KEY_MCAST BIT(0)
81#define KEY_UNICAST BIT(1) 89#define KEY_UNICAST BIT(1)
82#define KEY_ENABLED BIT(2) 90#define KEY_ENABLED BIT(2)
91#define KEY_DEFAULT BIT(3)
92#define KEY_TX_KEY BIT(4)
93#define KEY_RX_KEY BIT(5)
83#define KEY_IGTK BIT(10) 94#define KEY_IGTK BIT(10)
84 95
85#define WAPI_KEY_LEN 50 96#define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
86 97
87#define MAX_POLL_TRIES 100 98#define MAX_POLL_TRIES 100
88#define MAX_FIRMWARE_POLL_TRIES 100 99#define MAX_FIRMWARE_POLL_TRIES 100
@@ -130,6 +141,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
130#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22) 141#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
131#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) 142#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
132#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32) 143#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
144#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
133#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) 145#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
134#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) 146#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
135#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 147#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
@@ -144,6 +156,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
144#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82) 156#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
145#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83) 157#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
146#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84) 158#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
159#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86)
160#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87)
147#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93) 161#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
148#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94) 162#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
149#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104) 163#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
@@ -154,6 +168,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
154#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145) 168#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
155#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) 169#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
156#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) 170#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
171#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
172#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
157 173
158#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 174#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
159 175
@@ -176,13 +192,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
176#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192 192#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
177 193
178#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11)) 194#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
195#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
179 196
180#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ 197#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
181 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \ 198 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
182 IEEE80211_HT_CAP_SM_PS) 199 IEEE80211_HT_CAP_SM_PS)
183 200
201#define MWIFIEX_DEF_11N_TX_BF_CAP 0x09E1E008
202
184#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR 203#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
185 204
205#define GET_RXSTBC(x) (x & IEEE80211_HT_CAP_RX_STBC)
206#define MWIFIEX_RX_STBC1 0x0100
207#define MWIFIEX_RX_STBC12 0x0200
208#define MWIFIEX_RX_STBC123 0x0300
209
186/* dev_cap bitmap 210/* dev_cap bitmap
187 * BIT 211 * BIT
188 * 0-16 reserved 212 * 0-16 reserved
@@ -204,6 +228,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
204#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29)) 228#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
205#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8)) 229#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
206#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22)) 230#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
231#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
207 232
208/* httxcfg bitmap 233/* httxcfg bitmap
209 * 0 reserved 234 * 0 reserved
@@ -216,8 +241,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
216 */ 241 */
217#define MWIFIEX_FW_DEF_HTTXCFG (BIT(1) | BIT(4) | BIT(5) | BIT(6)) 242#define MWIFIEX_FW_DEF_HTTXCFG (BIT(1) | BIT(4) | BIT(5) | BIT(6))
218 243
244/* 11AC Tx and Rx MCS map for 1x1 mode:
245 * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1
246 * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 7 streams
247 */
248#define MWIFIEX_11AC_MCS_MAP_1X1 0xfffefffe
249
250/* 11AC Tx and Rx MCS map for 2x2 mode:
251 * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1 and 2
252 * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 6 streams
253 */
254#define MWIFIEX_11AC_MCS_MAP_2X2 0xfffafffa
255
219#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f) 256#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
220#define SETHT_MCS32(x) (x[4] |= 1) 257#define SETHT_MCS32(x) (x[4] |= 1)
258#define HT_STREAM_1X1 0x11
221#define HT_STREAM_2X2 0x22 259#define HT_STREAM_2X2 0x22
222 260
223#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4)) 261#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@@ -226,17 +264,24 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
226 264
227/* HW_SPEC fw_cap_info */ 265/* HW_SPEC fw_cap_info */
228 266
229#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13))) 267#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & BIT(13))
230 268
231#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3) 269#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
232#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3) 270#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
233#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \ 271#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
234 (2 * (nss - 1))) 272 (2 * (nss - 1)))
235#define NO_NSS_SUPPORT 0x3
236
237#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16) 273#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
238#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF) 274#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
239 275
276/* Clear SU Beanformer, MU beanformer, MU beanformee and
277 * sounding dimensions bits
278 */
279#define MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK \
280 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | \
281 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE | \
282 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | \
283 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
284
240#define MOD_CLASS_HR_DSSS 0x03 285#define MOD_CLASS_HR_DSSS 0x03
241#define MOD_CLASS_OFDM 0x07 286#define MOD_CLASS_OFDM 0x07
242#define MOD_CLASS_HT 0x08 287#define MOD_CLASS_HT 0x08
@@ -295,10 +340,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
295#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed 340#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
296#define HostCmd_CMD_SET_BSS_MODE 0x00f7 341#define HostCmd_CMD_SET_BSS_MODE 0x00f7
297#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa 342#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
343#define HostCmd_CMD_802_11_SCAN_EXT 0x0107
298#define HostCmd_CMD_COALESCE_CFG 0x010a 344#define HostCmd_CMD_COALESCE_CFG 0x010a
299#define HostCmd_CMD_MGMT_FRAME_REG 0x010c 345#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
300#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d 346#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
301#define HostCmd_CMD_11AC_CFG 0x0112 347#define HostCmd_CMD_11AC_CFG 0x0112
348#define HostCmd_CMD_TDLS_OPER 0x0122
302 349
303#define PROTOCOL_NO_SECURITY 0x01 350#define PROTOCOL_NO_SECURITY 0x01
304#define PROTOCOL_STATIC_WEP 0x02 351#define PROTOCOL_STATIC_WEP 0x02
@@ -440,6 +487,7 @@ enum P2P_MODES {
440#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c 487#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
441#define EVENT_HOSTWAKE_STAIE 0x0000004d 488#define EVENT_HOSTWAKE_STAIE 0x0000004d
442#define EVENT_CHANNEL_SWITCH_ANN 0x00000050 489#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
490#define EVENT_EXT_SCAN_REPORT 0x00000058
443#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f 491#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
444 492
445#define EVENT_ID_MASK 0xffff 493#define EVENT_ID_MASK 0xffff
@@ -468,6 +516,12 @@ enum P2P_MODES {
468#define MWIFIEX_CRITERIA_UNICAST BIT(1) 516#define MWIFIEX_CRITERIA_UNICAST BIT(1)
469#define MWIFIEX_CRITERIA_MULTICAST BIT(3) 517#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
470 518
519#define ACT_TDLS_DELETE 0x00
520#define ACT_TDLS_CREATE 0x01
521#define ACT_TDLS_CONFIG 0x02
522
523#define MWIFIEX_FW_V15 15
524
471struct mwifiex_ie_types_header { 525struct mwifiex_ie_types_header {
472 __le16 type; 526 __le16 type;
473 __le16 len; 527 __le16 len;
@@ -480,6 +534,7 @@ struct mwifiex_ie_types_data {
480 534
481#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01 535#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
482#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08 536#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
537#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
483 538
484struct txpd { 539struct txpd {
485 u8 bss_type; 540 u8 bss_type;
@@ -676,6 +731,56 @@ struct mwifiex_cmac_param {
676 u8 key[WLAN_KEY_LEN_AES_CMAC]; 731 u8 key[WLAN_KEY_LEN_AES_CMAC];
677} __packed; 732} __packed;
678 733
734struct mwifiex_wep_param {
735 __le16 key_len;
736 u8 key[WLAN_KEY_LEN_WEP104];
737} __packed;
738
739struct mwifiex_tkip_param {
740 u8 pn[WPA_PN_SIZE];
741 __le16 key_len;
742 u8 key[WLAN_KEY_LEN_TKIP];
743} __packed;
744
745struct mwifiex_aes_param {
746 u8 pn[WPA_PN_SIZE];
747 __le16 key_len;
748 u8 key[WLAN_KEY_LEN_CCMP];
749} __packed;
750
751struct mwifiex_wapi_param {
752 u8 pn[PN_LEN];
753 __le16 key_len;
754 u8 key[WLAN_KEY_LEN_SMS4];
755} __packed;
756
757struct mwifiex_cmac_aes_param {
758 u8 ipn[IGTK_PN_LEN];
759 __le16 key_len;
760 u8 key[WLAN_KEY_LEN_AES_CMAC];
761} __packed;
762
763struct mwifiex_ie_type_key_param_set_v2 {
764 __le16 type;
765 __le16 len;
766 u8 mac_addr[ETH_ALEN];
767 u8 key_idx;
768 u8 key_type;
769 __le16 key_info;
770 union {
771 struct mwifiex_wep_param wep;
772 struct mwifiex_tkip_param tkip;
773 struct mwifiex_aes_param aes;
774 struct mwifiex_wapi_param wapi;
775 struct mwifiex_cmac_aes_param cmac_aes;
776 } key_params;
777} __packed;
778
779struct host_cmd_ds_802_11_key_material_v2 {
780 __le16 action;
781 struct mwifiex_ie_type_key_param_set_v2 key_param_set;
782} __packed;
783
679struct host_cmd_ds_802_11_key_material { 784struct host_cmd_ds_802_11_key_material {
680 __le16 action; 785 __le16 action;
681 struct mwifiex_ie_type_key_param_set key_param_set; 786 struct mwifiex_ie_type_key_param_set key_param_set;
@@ -727,6 +832,17 @@ struct host_cmd_ds_802_11_ps_mode_enh {
727 } params; 832 } params;
728} __packed; 833} __packed;
729 834
835enum FW_API_VER_ID {
836 KEY_API_VER_ID = 1,
837};
838
839struct hw_spec_fw_api_rev {
840 struct mwifiex_ie_types_header header;
841 __le16 api_id;
842 u8 major_ver;
843 u8 minor_ver;
844} __packed;
845
730struct host_cmd_ds_get_hw_spec { 846struct host_cmd_ds_get_hw_spec {
731 __le16 hw_if_version; 847 __le16 hw_if_version;
732 __le16 version; 848 __le16 version;
@@ -748,6 +864,7 @@ struct host_cmd_ds_get_hw_spec {
748 __le32 reserved_6; 864 __le32 reserved_6;
749 __le32 dot_11ac_dev_cap; 865 __le32 dot_11ac_dev_cap;
750 __le32 dot_11ac_mcs_support; 866 __le32 dot_11ac_mcs_support;
867 u8 tlvs[0];
751} __packed; 868} __packed;
752 869
753struct host_cmd_ds_802_11_rssi_info { 870struct host_cmd_ds_802_11_rssi_info {
@@ -993,6 +1110,7 @@ struct mwifiex_rate_scope {
993 __le16 hr_dsss_rate_bitmap; 1110 __le16 hr_dsss_rate_bitmap;
994 __le16 ofdm_rate_bitmap; 1111 __le16 ofdm_rate_bitmap;
995 __le16 ht_mcs_rate_bitmap[8]; 1112 __le16 ht_mcs_rate_bitmap[8];
1113 __le16 vht_mcs_rate_bitmap[8];
996} __packed; 1114} __packed;
997 1115
998struct mwifiex_rate_drop_pattern { 1116struct mwifiex_rate_drop_pattern {
@@ -1047,14 +1165,28 @@ struct host_cmd_ds_rf_ant_siso {
1047 __le16 ant_mode; 1165 __le16 ant_mode;
1048}; 1166};
1049 1167
1050struct mwifiex_bcn_param { 1168struct host_cmd_ds_tdls_oper {
1051 u8 bssid[ETH_ALEN]; 1169 __le16 tdls_action;
1052 u8 rssi; 1170 __le16 reason;
1171 u8 peer_mac[ETH_ALEN];
1172} __packed;
1173
1174struct mwifiex_fixed_bcn_param {
1053 __le64 timestamp; 1175 __le64 timestamp;
1054 __le16 beacon_period; 1176 __le16 beacon_period;
1055 __le16 cap_info_bitmap; 1177 __le16 cap_info_bitmap;
1056} __packed; 1178} __packed;
1057 1179
1180struct mwifiex_event_scan_result {
1181 __le16 event_id;
1182 u8 bss_index;
1183 u8 bss_type;
1184 u8 more_event;
1185 u8 reserved[3];
1186 __le16 buf_size;
1187 u8 num_of_set;
1188} __packed;
1189
1058#define MWIFIEX_USER_SCAN_CHAN_MAX 50 1190#define MWIFIEX_USER_SCAN_CHAN_MAX 50
1059 1191
1060#define MWIFIEX_MAX_SSID_LIST_LENGTH 10 1192#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
@@ -1124,6 +1256,28 @@ struct host_cmd_ds_802_11_scan_rsp {
1124 u8 bss_desc_and_tlv_buffer[1]; 1256 u8 bss_desc_and_tlv_buffer[1];
1125} __packed; 1257} __packed;
1126 1258
1259struct host_cmd_ds_802_11_scan_ext {
1260 u32 reserved;
1261 u8 tlv_buffer[1];
1262} __packed;
1263
1264struct mwifiex_ie_types_bss_scan_rsp {
1265 struct mwifiex_ie_types_header header;
1266 u8 bssid[ETH_ALEN];
1267 u8 frame_body[1];
1268} __packed;
1269
1270struct mwifiex_ie_types_bss_scan_info {
1271 struct mwifiex_ie_types_header header;
1272 __le16 rssi;
1273 __le16 anpi;
1274 u8 cca_busy_fraction;
1275 u8 radio_type;
1276 u8 channel;
1277 u8 reserved;
1278 __le64 tsf;
1279} __packed;
1280
1127struct host_cmd_ds_802_11_bg_scan_query { 1281struct host_cmd_ds_802_11_bg_scan_query {
1128 u8 flush; 1282 u8 flush;
1129} __packed; 1283} __packed;
@@ -1296,6 +1450,11 @@ struct mwifiex_ie_types_vhtcap {
1296 struct ieee80211_vht_cap vht_cap; 1450 struct ieee80211_vht_cap vht_cap;
1297} __packed; 1451} __packed;
1298 1452
1453struct mwifiex_ie_types_aid {
1454 struct mwifiex_ie_types_header header;
1455 __le16 aid;
1456} __packed;
1457
1299struct mwifiex_ie_types_oper_mode_ntf { 1458struct mwifiex_ie_types_oper_mode_ntf {
1300 struct mwifiex_ie_types_header header; 1459 struct mwifiex_ie_types_header header;
1301 u8 oper_mode; 1460 u8 oper_mode;
@@ -1331,6 +1490,11 @@ struct mwifiex_ie_types_extcap {
1331 u8 ext_capab[0]; 1490 u8 ext_capab[0];
1332} __packed; 1491} __packed;
1333 1492
1493struct mwifiex_ie_types_qos_info {
1494 struct mwifiex_ie_types_header header;
1495 u8 qos_info;
1496} __packed;
1497
1334struct host_cmd_ds_mac_reg_access { 1498struct host_cmd_ds_mac_reg_access {
1335 __le16 action; 1499 __le16 action;
1336 __le16 offset; 1500 __le16 offset;
@@ -1441,6 +1605,11 @@ struct host_cmd_tlv_rates {
1441 u8 rates[0]; 1605 u8 rates[0];
1442} __packed; 1606} __packed;
1443 1607
1608struct mwifiex_ie_types_bssid_list {
1609 struct mwifiex_ie_types_header header;
1610 u8 bssid[ETH_ALEN];
1611} __packed;
1612
1444struct host_cmd_tlv_bcast_ssid { 1613struct host_cmd_tlv_bcast_ssid {
1445 struct mwifiex_ie_types_header header; 1614 struct mwifiex_ie_types_header header;
1446 u8 bcast_ctl; 1615 u8 bcast_ctl;
@@ -1634,6 +1803,7 @@ struct host_cmd_ds_command {
1634 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh; 1803 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
1635 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg; 1804 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
1636 struct host_cmd_ds_802_11_scan scan; 1805 struct host_cmd_ds_802_11_scan scan;
1806 struct host_cmd_ds_802_11_scan_ext ext_scan;
1637 struct host_cmd_ds_802_11_scan_rsp scan_resp; 1807 struct host_cmd_ds_802_11_scan_rsp scan_resp;
1638 struct host_cmd_ds_802_11_bg_scan_query bg_scan_query; 1808 struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
1639 struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp; 1809 struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
@@ -1653,6 +1823,7 @@ struct host_cmd_ds_command {
1653 struct host_cmd_ds_11n_cfg htcfg; 1823 struct host_cmd_ds_11n_cfg htcfg;
1654 struct host_cmd_ds_wmm_get_status get_wmm_status; 1824 struct host_cmd_ds_wmm_get_status get_wmm_status;
1655 struct host_cmd_ds_802_11_key_material key_material; 1825 struct host_cmd_ds_802_11_key_material key_material;
1826 struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
1656 struct host_cmd_ds_version_ext verext; 1827 struct host_cmd_ds_version_ext verext;
1657 struct host_cmd_ds_mgmt_frame_reg reg_mask; 1828 struct host_cmd_ds_mgmt_frame_reg reg_mask;
1658 struct host_cmd_ds_remain_on_chan roc_cfg; 1829 struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1671,6 +1842,7 @@ struct host_cmd_ds_command {
1671 struct host_cmd_ds_sta_deauth sta_deauth; 1842 struct host_cmd_ds_sta_deauth sta_deauth;
1672 struct host_cmd_11ac_vht_cfg vht_cfg; 1843 struct host_cmd_11ac_vht_cfg vht_cfg;
1673 struct host_cmd_ds_coalesce_cfg coalesce_cfg; 1844 struct host_cmd_ds_coalesce_cfg coalesce_cfg;
1845 struct host_cmd_ds_tdls_oper tdls_oper;
1674 } params; 1846 } params;
1675} __packed; 1847} __packed;
1676 1848
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 81ac001ee741..3bf3d58bbc02 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -138,9 +138,9 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
138 } 138 }
139 139
140 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) 140 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
141 return mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG, 141 return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
142 HostCmd_ACT_GEN_SET, 142 HostCmd_ACT_GEN_SET,
143 UAP_CUSTOM_IE_I, ie_list); 143 UAP_CUSTOM_IE_I, ie_list, false);
144 144
145 return 0; 145 return 0;
146} 146}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 1d0a817f2bf0..4ecd0b208ac6 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
137 priv->csa_expire_time = 0; 137 priv->csa_expire_time = 0;
138 priv->del_list_idx = 0; 138 priv->del_list_idx = 0;
139 priv->hs2_enabled = false; 139 priv->hs2_enabled = false;
140 memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
140 141
141 return mwifiex_add_bss_prio_tbl(priv); 142 return mwifiex_add_bss_prio_tbl(priv);
142} 143}
@@ -233,7 +234,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
233 234
234 adapter->pm_wakeup_fw_try = false; 235 adapter->pm_wakeup_fw_try = false;
235 236
236 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
237 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; 237 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
238 238
239 adapter->is_hs_configured = false; 239 adapter->is_hs_configured = false;
@@ -281,6 +281,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
281 adapter->arp_filter_size = 0; 281 adapter->arp_filter_size = 0;
282 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; 282 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
283 adapter->empty_tx_q_cnt = 0; 283 adapter->empty_tx_q_cnt = 0;
284 adapter->ext_scan = true;
285 adapter->fw_key_api_major_ver = 0;
286 adapter->fw_key_api_minor_ver = 0;
284} 287}
285 288
286/* 289/*
@@ -450,6 +453,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
450 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr); 453 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
451 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 454 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
452 INIT_LIST_HEAD(&priv->sta_list); 455 INIT_LIST_HEAD(&priv->sta_list);
456 skb_queue_head_init(&priv->tdls_txq);
453 457
454 spin_lock_init(&priv->tx_ba_stream_tbl_lock); 458 spin_lock_init(&priv->tx_ba_stream_tbl_lock);
455 spin_lock_init(&priv->rx_reorder_tbl_lock); 459 spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -615,7 +619,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
615 /* cancel current command */ 619 /* cancel current command */
616 if (adapter->curr_cmd) { 620 if (adapter->curr_cmd) {
617 dev_warn(adapter->dev, "curr_cmd is still in processing\n"); 621 dev_warn(adapter->dev, "curr_cmd is still in processing\n");
618 del_timer(&adapter->cmd_timer); 622 del_timer_sync(&adapter->cmd_timer);
619 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); 623 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
620 adapter->curr_cmd = NULL; 624 adapter->curr_cmd = NULL;
621 } 625 }
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 00a95f4c6a6c..ee494db54060 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -60,8 +60,7 @@ enum {
60 BAND_A = 4, 60 BAND_A = 4,
61 BAND_GN = 8, 61 BAND_GN = 8,
62 BAND_AN = 16, 62 BAND_AN = 16,
63 BAND_GAC = 32, 63 BAND_AAC = 32,
64 BAND_AAC = 64,
65}; 64};
66 65
67#define MWIFIEX_WPA_PASSHPHRASE_LEN 64 66#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
@@ -86,6 +85,10 @@ struct wep_key {
86#define BAND_CONFIG_A 0x01 85#define BAND_CONFIG_A 0x01
87#define MWIFIEX_SUPPORTED_RATES 14 86#define MWIFIEX_SUPPORTED_RATES 14
88#define MWIFIEX_SUPPORTED_RATES_EXT 32 87#define MWIFIEX_SUPPORTED_RATES_EXT 32
88#define MWIFIEX_TDLS_SUPPORTED_RATES 8
89#define MWIFIEX_TDLS_DEF_QOS_CAPAB 0xf
90#define MWIFIEX_PRIO_BK 2
91#define MWIFIEX_PRIO_VI 5
89 92
90struct mwifiex_uap_bss_param { 93struct mwifiex_uap_bss_param {
91 u8 channel; 94 u8 channel;
@@ -174,6 +177,7 @@ struct mwifiex_ds_rx_reorder_tbl {
174struct mwifiex_ds_tx_ba_stream_tbl { 177struct mwifiex_ds_tx_ba_stream_tbl {
175 u16 tid; 178 u16 tid;
176 u8 ra[ETH_ALEN]; 179 u8 ra[ETH_ALEN];
180 u8 amsdu;
177}; 181};
178 182
179#define DBG_CMD_NUM 5 183#define DBG_CMD_NUM 5
@@ -206,7 +210,7 @@ struct mwifiex_debug_info {
206 u32 num_cmd_assoc_success; 210 u32 num_cmd_assoc_success;
207 u32 num_cmd_assoc_failure; 211 u32 num_cmd_assoc_failure;
208 u32 num_tx_timeout; 212 u32 num_tx_timeout;
209 u32 num_cmd_timeout; 213 u8 is_cmd_timedout;
210 u16 timeout_cmd_id; 214 u16 timeout_cmd_id;
211 u16 timeout_cmd_act; 215 u16 timeout_cmd_act;
212 u16 last_cmd_id[DBG_CMD_NUM]; 216 u16 last_cmd_id[DBG_CMD_NUM];
@@ -233,7 +237,10 @@ struct mwifiex_ds_encrypt_key {
233 u8 mac_addr[ETH_ALEN]; 237 u8 mac_addr[ETH_ALEN];
234 u32 is_wapi_key; 238 u32 is_wapi_key;
235 u8 pn[PN_LEN]; /* packet number */ 239 u8 pn[PN_LEN]; /* packet number */
240 u8 pn_len;
236 u8 is_igtk_key; 241 u8 is_igtk_key;
242 u8 is_current_wep_key;
243 u8 is_rx_seq_valid;
237}; 244};
238 245
239struct mwifiex_power_cfg { 246struct mwifiex_power_cfg {
@@ -432,4 +439,16 @@ struct mwifiex_ds_coalesce_cfg {
432 struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES]; 439 struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
433}; 440};
434 441
442struct mwifiex_ds_tdls_oper {
443 u16 tdls_action;
444 u8 peer_mac[ETH_ALEN];
445 u16 capability;
446 u8 qos_info;
447 u8 *ext_capab;
448 u8 ext_capab_len;
449 u8 *supp_rates;
450 u8 supp_rates_len;
451 u8 *ht_capab;
452};
453
435#endif /* !_MWIFIEX_IOCTL_H_ */ 454#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 4e4686e6ac09..89dc62a467f4 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -515,8 +515,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
515 515
516 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) && 516 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
517 !bss_desc->disable_11n && !bss_desc->disable_11ac && 517 !bss_desc->disable_11n && !bss_desc->disable_11ac &&
518 (priv->adapter->config_bands & BAND_GAC || 518 priv->adapter->config_bands & BAND_AAC)
519 priv->adapter->config_bands & BAND_AAC))
520 mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos); 519 mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
521 520
522 /* Append vendor specific IE TLV */ 521 /* Append vendor specific IE TLV */
@@ -902,9 +901,9 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
902 mwifiex_get_active_data_rates(priv, adhoc_start->data_rate); 901 mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
903 if ((adapter->adhoc_start_band & BAND_G) && 902 if ((adapter->adhoc_start_band & BAND_G) &&
904 (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) { 903 (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
905 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, 904 if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
906 HostCmd_ACT_GEN_SET, 0, 905 HostCmd_ACT_GEN_SET, 0,
907 &priv->curr_pkt_filter)) { 906 &priv->curr_pkt_filter, false)) {
908 dev_err(adapter->dev, 907 dev_err(adapter->dev,
909 "ADHOC_S_CMD: G Protection config failed\n"); 908 "ADHOC_S_CMD: G Protection config failed\n");
910 return -1; 909 return -1;
@@ -983,7 +982,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
983 cpu_to_le16(sizeof(struct ieee80211_ht_cap)); 982 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
984 radio_type = mwifiex_band_to_radio_type( 983 radio_type = mwifiex_band_to_radio_type(
985 priv->adapter->config_bands); 984 priv->adapter->config_bands);
986 mwifiex_fill_cap_info(priv, radio_type, ht_cap); 985 mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
987 986
988 if (adapter->sec_chan_offset == 987 if (adapter->sec_chan_offset ==
989 IEEE80211_HT_PARAM_CHA_SEC_NONE) { 988 IEEE80211_HT_PARAM_CHA_SEC_NONE) {
@@ -1074,9 +1073,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
1074 priv-> 1073 priv->
1075 curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON; 1074 curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
1076 1075
1077 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, 1076 if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
1078 HostCmd_ACT_GEN_SET, 0, 1077 HostCmd_ACT_GEN_SET, 0,
1079 &curr_pkt_filter)) { 1078 &curr_pkt_filter, false)) {
1080 dev_err(priv->adapter->dev, 1079 dev_err(priv->adapter->dev,
1081 "ADHOC_J_CMD: G Protection config failed\n"); 1080 "ADHOC_J_CMD: G Protection config failed\n");
1082 return -1; 1081 return -1;
@@ -1300,8 +1299,7 @@ int mwifiex_associate(struct mwifiex_private *priv,
1300 1299
1301 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) && 1300 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
1302 !bss_desc->disable_11n && !bss_desc->disable_11ac && 1301 !bss_desc->disable_11n && !bss_desc->disable_11ac &&
1303 (priv->adapter->config_bands & BAND_GAC || 1302 priv->adapter->config_bands & BAND_AAC)
1304 priv->adapter->config_bands & BAND_AAC))
1305 mwifiex_set_11ac_ba_params(priv); 1303 mwifiex_set_11ac_ba_params(priv);
1306 else 1304 else
1307 mwifiex_set_ba_params(priv); 1305 mwifiex_set_ba_params(priv);
@@ -1314,8 +1312,8 @@ int mwifiex_associate(struct mwifiex_private *priv,
1314 retrieval */ 1312 retrieval */
1315 priv->assoc_rsp_size = 0; 1313 priv->assoc_rsp_size = 0;
1316 1314
1317 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_ASSOCIATE, 1315 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_ASSOCIATE,
1318 HostCmd_ACT_GEN_SET, 0, bss_desc); 1316 HostCmd_ACT_GEN_SET, 0, bss_desc, true);
1319} 1317}
1320 1318
1321/* 1319/*
@@ -1335,14 +1333,13 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
1335 priv->curr_bss_params.band); 1333 priv->curr_bss_params.band);
1336 1334
1337 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) && 1335 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
1338 (priv->adapter->config_bands & BAND_GAC || 1336 priv->adapter->config_bands & BAND_AAC)
1339 priv->adapter->config_bands & BAND_AAC))
1340 mwifiex_set_11ac_ba_params(priv); 1337 mwifiex_set_11ac_ba_params(priv);
1341 else 1338 else
1342 mwifiex_set_ba_params(priv); 1339 mwifiex_set_ba_params(priv);
1343 1340
1344 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START, 1341 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_START,
1345 HostCmd_ACT_GEN_SET, 0, adhoc_ssid); 1342 HostCmd_ACT_GEN_SET, 0, adhoc_ssid, true);
1346} 1343}
1347 1344
1348/* 1345/*
@@ -1376,8 +1373,7 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
1376 1373
1377 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) && 1374 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
1378 !bss_desc->disable_11n && !bss_desc->disable_11ac && 1375 !bss_desc->disable_11n && !bss_desc->disable_11ac &&
1379 (priv->adapter->config_bands & BAND_GAC || 1376 priv->adapter->config_bands & BAND_AAC)
1380 priv->adapter->config_bands & BAND_AAC))
1381 mwifiex_set_11ac_ba_params(priv); 1377 mwifiex_set_11ac_ba_params(priv);
1382 else 1378 else
1383 mwifiex_set_ba_params(priv); 1379 mwifiex_set_ba_params(priv);
@@ -1387,8 +1383,8 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
1387 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n", 1383 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
1388 priv->curr_bss_params.band); 1384 priv->curr_bss_params.band);
1389 1385
1390 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_JOIN, 1386 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
1391 HostCmd_ACT_GEN_SET, 0, bss_desc); 1387 HostCmd_ACT_GEN_SET, 0, bss_desc, true);
1392} 1388}
1393 1389
1394/* 1390/*
@@ -1407,8 +1403,8 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
1407 else 1403 else
1408 memcpy(mac_address, mac, ETH_ALEN); 1404 memcpy(mac_address, mac, ETH_ALEN);
1409 1405
1410 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE, 1406 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
1411 HostCmd_ACT_GEN_SET, 0, mac_address); 1407 HostCmd_ACT_GEN_SET, 0, mac_address, true);
1412 1408
1413 return ret; 1409 return ret;
1414} 1410}
@@ -1436,19 +1432,31 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1436 GFP_KERNEL); 1432 GFP_KERNEL);
1437 break; 1433 break;
1438 case NL80211_IFTYPE_ADHOC: 1434 case NL80211_IFTYPE_ADHOC:
1439 return mwifiex_send_cmd_sync(priv, 1435 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
1440 HostCmd_CMD_802_11_AD_HOC_STOP, 1436 HostCmd_ACT_GEN_SET, 0, NULL, true);
1441 HostCmd_ACT_GEN_SET, 0, NULL);
1442 case NL80211_IFTYPE_AP: 1437 case NL80211_IFTYPE_AP:
1443 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1438 return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
1444 HostCmd_ACT_GEN_SET, 0, NULL); 1439 HostCmd_ACT_GEN_SET, 0, NULL, true);
1445 default: 1440 default:
1446 break; 1441 break;
1447 } 1442 }
1448 1443
1449 return ret; 1444 return ret;
1450} 1445}
1451EXPORT_SYMBOL_GPL(mwifiex_deauthenticate); 1446
1447/* This function deauthenticates/disconnects from all BSS. */
1448void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter)
1449{
1450 struct mwifiex_private *priv;
1451 int i;
1452
1453 for (i = 0; i < adapter->priv_num; i++) {
1454 priv = adapter->priv[i];
1455 if (priv)
1456 mwifiex_deauthenticate(priv, NULL);
1457 }
1458}
1459EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all);
1452 1460
1453/* 1461/*
1454 * This function converts band to radio type used in channel TLV. 1462 * This function converts band to radio type used in channel TLV.
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d3d2758ec35..77db0886c6e2 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -38,7 +38,8 @@ static void scan_delay_timer_fn(unsigned long data)
38 if (adapter->surprise_removed) 38 if (adapter->surprise_removed)
39 return; 39 return;
40 40
41 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { 41 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT ||
42 !adapter->scan_processing) {
42 /* 43 /*
43 * Abort scan operation by cancelling all pending scan 44 * Abort scan operation by cancelling all pending scan
44 * commands 45 * commands
@@ -194,7 +195,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
194 if (adapter->if_ops.cleanup_if) 195 if (adapter->if_ops.cleanup_if)
195 adapter->if_ops.cleanup_if(adapter); 196 adapter->if_ops.cleanup_if(adapter);
196 197
197 del_timer(&adapter->cmd_timer); 198 del_timer_sync(&adapter->cmd_timer);
198 199
199 /* Free private structures */ 200 /* Free private structures */
200 for (i = 0; i < adapter->priv_num; i++) { 201 for (i = 0; i < adapter->priv_num; i++) {
@@ -678,8 +679,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
678 memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN); 679 memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
679 680
680 /* Send request to firmware */ 681 /* Send request to firmware */
681 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_MAC_ADDRESS, 682 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
682 HostCmd_ACT_GEN_SET, 0, NULL); 683 HostCmd_ACT_GEN_SET, 0, NULL, true);
683 684
684 if (!ret) 685 if (!ret)
685 memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN); 686 memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
@@ -871,7 +872,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
871 adapter->is_suspended = false; 872 adapter->is_suspended = false;
872 adapter->hs_activated = false; 873 adapter->hs_activated = false;
873 init_waitqueue_head(&adapter->hs_activate_wait_q); 874 init_waitqueue_head(&adapter->hs_activate_wait_q);
874 adapter->cmd_wait_q_required = false;
875 init_waitqueue_head(&adapter->cmd_wait_q.wait); 875 init_waitqueue_head(&adapter->cmd_wait_q.wait);
876 adapter->cmd_wait_q.status = 0; 876 adapter->cmd_wait_q.status = 0;
877 adapter->scan_wait_q_woken = false; 877 adapter->scan_wait_q_woken = false;
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d8ad554ce39f..d53e1e8c9467 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -59,7 +59,7 @@ enum {
59 59
60#define MWIFIEX_UPLD_SIZE (2312) 60#define MWIFIEX_UPLD_SIZE (2312)
61 61
62#define MAX_EVENT_SIZE 1024 62#define MAX_EVENT_SIZE 2048
63 63
64#define ARP_FILTER_MAX_BUF_SIZE 68 64#define ARP_FILTER_MAX_BUF_SIZE 68
65 65
@@ -116,7 +116,7 @@ enum {
116#define MWIFIEX_TYPE_DATA 0 116#define MWIFIEX_TYPE_DATA 0
117#define MWIFIEX_TYPE_EVENT 3 117#define MWIFIEX_TYPE_EVENT 3
118 118
119#define MAX_BITMAP_RATES_SIZE 10 119#define MAX_BITMAP_RATES_SIZE 18
120 120
121#define MAX_CHANNEL_BAND_BG 14 121#define MAX_CHANNEL_BAND_BG 14
122#define MAX_CHANNEL_BAND_A 165 122#define MAX_CHANNEL_BAND_A 165
@@ -145,7 +145,6 @@ struct mwifiex_dbg {
145 u32 num_cmd_assoc_success; 145 u32 num_cmd_assoc_success;
146 u32 num_cmd_assoc_failure; 146 u32 num_cmd_assoc_failure;
147 u32 num_tx_timeout; 147 u32 num_tx_timeout;
148 u32 num_cmd_timeout;
149 u16 timeout_cmd_id; 148 u16 timeout_cmd_id;
150 u16 timeout_cmd_act; 149 u16 timeout_cmd_act;
151 u16 last_cmd_id[DBG_CMD_NUM]; 150 u16 last_cmd_id[DBG_CMD_NUM];
@@ -193,6 +192,8 @@ struct mwifiex_add_ba_param {
193 u32 tx_win_size; 192 u32 tx_win_size;
194 u32 rx_win_size; 193 u32 rx_win_size;
195 u32 timeout; 194 u32 timeout;
195 u8 tx_amsdu;
196 u8 rx_amsdu;
196}; 197};
197 198
198struct mwifiex_tx_aggr { 199struct mwifiex_tx_aggr {
@@ -210,6 +211,7 @@ struct mwifiex_ra_list_tbl {
210 u16 ba_pkt_count; 211 u16 ba_pkt_count;
211 u8 ba_packet_thr; 212 u8 ba_packet_thr;
212 u16 total_pkt_count; 213 u16 total_pkt_count;
214 bool tdls_link;
213}; 215};
214 216
215struct mwifiex_tid_tbl { 217struct mwifiex_tid_tbl {
@@ -262,6 +264,31 @@ struct ieee_types_generic {
262 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)]; 264 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
263} __packed; 265} __packed;
264 266
267struct ieee_types_bss_co_2040 {
268 struct ieee_types_header ieee_hdr;
269 u8 bss_2040co;
270} __packed;
271
272struct ieee_types_extcap {
273 struct ieee_types_header ieee_hdr;
274 u8 ext_capab[8];
275} __packed;
276
277struct ieee_types_vht_cap {
278 struct ieee_types_header ieee_hdr;
279 struct ieee80211_vht_cap vhtcap;
280} __packed;
281
282struct ieee_types_vht_oper {
283 struct ieee_types_header ieee_hdr;
284 struct ieee80211_vht_operation vhtoper;
285} __packed;
286
287struct ieee_types_aid {
288 struct ieee_types_header ieee_hdr;
289 u16 aid;
290} __packed;
291
265struct mwifiex_bssdescriptor { 292struct mwifiex_bssdescriptor {
266 u8 mac_address[ETH_ALEN]; 293 u8 mac_address[ETH_ALEN];
267 struct cfg80211_ssid ssid; 294 struct cfg80211_ssid ssid;
@@ -443,6 +470,7 @@ struct mwifiex_private {
443 u8 wpa_ie_len; 470 u8 wpa_ie_len;
444 u8 wpa_is_gtk_set; 471 u8 wpa_is_gtk_set;
445 struct host_cmd_ds_802_11_key_material aes_key; 472 struct host_cmd_ds_802_11_key_material aes_key;
473 struct host_cmd_ds_802_11_key_material_v2 aes_key_v2;
446 u8 wapi_ie[256]; 474 u8 wapi_ie[256];
447 u8 wapi_ie_len; 475 u8 wapi_ie_len;
448 u8 *wps_ie; 476 u8 *wps_ie;
@@ -461,6 +489,7 @@ struct mwifiex_private {
461 struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID]; 489 struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
462 struct mwifiex_add_ba_param add_ba_param; 490 struct mwifiex_add_ba_param add_ba_param;
463 u16 rx_seq[MAX_NUM_TID]; 491 u16 rx_seq[MAX_NUM_TID];
492 u8 tos_to_tid_inv[MAX_NUM_TID];
464 struct list_head rx_reorder_tbl_ptr; 493 struct list_head rx_reorder_tbl_ptr;
465 /* spin lock for rx_reorder_tbl_ptr queue */ 494 /* spin lock for rx_reorder_tbl_ptr queue */
466 spinlock_t rx_reorder_tbl_lock; 495 spinlock_t rx_reorder_tbl_lock;
@@ -518,6 +547,8 @@ struct mwifiex_private {
518 unsigned long csa_expire_time; 547 unsigned long csa_expire_time;
519 u8 del_list_idx; 548 u8 del_list_idx;
520 bool hs2_enabled; 549 bool hs2_enabled;
550 struct station_parameters *sta_params;
551 struct sk_buff_head tdls_txq;
521}; 552};
522 553
523enum mwifiex_ba_status { 554enum mwifiex_ba_status {
@@ -531,6 +562,7 @@ struct mwifiex_tx_ba_stream_tbl {
531 int tid; 562 int tid;
532 u8 ra[ETH_ALEN]; 563 u8 ra[ETH_ALEN];
533 enum mwifiex_ba_status ba_status; 564 enum mwifiex_ba_status ba_status;
565 u8 amsdu;
534}; 566};
535 567
536struct mwifiex_rx_reorder_tbl; 568struct mwifiex_rx_reorder_tbl;
@@ -545,10 +577,12 @@ struct mwifiex_rx_reorder_tbl {
545 struct list_head list; 577 struct list_head list;
546 int tid; 578 int tid;
547 u8 ta[ETH_ALEN]; 579 u8 ta[ETH_ALEN];
580 int init_win;
548 int start_win; 581 int start_win;
549 int win_size; 582 int win_size;
550 void **rx_reorder_ptr; 583 void **rx_reorder_ptr;
551 struct reorder_tmr_cnxt timer_context; 584 struct reorder_tmr_cnxt timer_context;
585 u8 amsdu;
552 u8 flags; 586 u8 flags;
553}; 587};
554 588
@@ -583,17 +617,35 @@ struct mwifiex_bss_priv {
583 u64 fw_tsf; 617 u64 fw_tsf;
584}; 618};
585 619
586/* This is AP specific structure which stores information 620struct mwifiex_tdls_capab {
587 * about associated STA 621 __le16 capab;
622 u8 rates[32];
623 u8 rates_len;
624 u8 qos_info;
625 u8 coex_2040;
626 u16 aid;
627 struct ieee80211_ht_cap ht_capb;
628 struct ieee80211_ht_operation ht_oper;
629 struct ieee_types_extcap extcap;
630 struct ieee_types_generic rsn_ie;
631 struct ieee80211_vht_cap vhtcap;
632 struct ieee80211_vht_operation vhtoper;
633};
634
635/* This is AP/TDLS specific structure which stores information
636 * about associated/peer STA
588 */ 637 */
589struct mwifiex_sta_node { 638struct mwifiex_sta_node {
590 struct list_head list; 639 struct list_head list;
591 u8 mac_addr[ETH_ALEN]; 640 u8 mac_addr[ETH_ALEN];
592 u8 is_wmm_enabled; 641 u8 is_wmm_enabled;
593 u8 is_11n_enabled; 642 u8 is_11n_enabled;
643 u8 is_11ac_enabled;
594 u8 ampdu_sta[MAX_NUM_TID]; 644 u8 ampdu_sta[MAX_NUM_TID];
595 u16 rx_seq[MAX_NUM_TID]; 645 u16 rx_seq[MAX_NUM_TID];
596 u16 max_amsdu; 646 u16 max_amsdu;
647 u8 tdls_status;
648 struct mwifiex_tdls_capab tdls_cap;
597}; 649};
598 650
599struct mwifiex_if_ops { 651struct mwifiex_if_ops {
@@ -671,7 +723,7 @@ struct mwifiex_adapter {
671 struct cmd_ctrl_node *curr_cmd; 723 struct cmd_ctrl_node *curr_cmd;
672 /* spin lock for command */ 724 /* spin lock for command */
673 spinlock_t mwifiex_cmd_lock; 725 spinlock_t mwifiex_cmd_lock;
674 u32 num_cmd_timeout; 726 u8 is_cmd_timedout;
675 u16 last_init_cmd; 727 u16 last_init_cmd;
676 struct timer_list cmd_timer; 728 struct timer_list cmd_timer;
677 struct list_head cmd_free_q; 729 struct list_head cmd_free_q;
@@ -722,15 +774,16 @@ struct mwifiex_adapter {
722 u16 hs_activate_wait_q_woken; 774 u16 hs_activate_wait_q_woken;
723 wait_queue_head_t hs_activate_wait_q; 775 wait_queue_head_t hs_activate_wait_q;
724 bool is_suspended; 776 bool is_suspended;
777 bool hs_enabling;
725 u8 event_body[MAX_EVENT_SIZE]; 778 u8 event_body[MAX_EVENT_SIZE];
726 u32 hw_dot_11n_dev_cap; 779 u32 hw_dot_11n_dev_cap;
727 u8 hw_dev_mcs_support; 780 u8 hw_dev_mcs_support;
781 u8 user_dev_mcs_support;
728 u8 adhoc_11n_enabled; 782 u8 adhoc_11n_enabled;
729 u8 sec_chan_offset; 783 u8 sec_chan_offset;
730 struct mwifiex_dbg dbg; 784 struct mwifiex_dbg dbg;
731 u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE]; 785 u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
732 u32 arp_filter_size; 786 u32 arp_filter_size;
733 u16 cmd_wait_q_required;
734 struct mwifiex_wait_queue cmd_wait_q; 787 struct mwifiex_wait_queue cmd_wait_q;
735 u8 scan_wait_q_woken; 788 u8 scan_wait_q_woken;
736 spinlock_t queue_lock; /* lock for tx queues */ 789 spinlock_t queue_lock; /* lock for tx queues */
@@ -753,6 +806,9 @@ struct mwifiex_adapter {
753 atomic_t is_tx_received; 806 atomic_t is_tx_received;
754 atomic_t pending_bridged_pkts; 807 atomic_t pending_bridged_pkts;
755 struct semaphore *card_sem; 808 struct semaphore *card_sem;
809 bool ext_scan;
810 u8 fw_api_ver;
811 u8 fw_key_api_major_ver, fw_key_api_minor_ver;
756}; 812};
757 813
758int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 814int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -788,11 +844,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter);
788int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, 844int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
789 struct cmd_ctrl_node *cmd_node); 845 struct cmd_ctrl_node *cmd_node);
790 846
791int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no, 847int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
792 u16 cmd_action, u32 cmd_oid, void *data_buf); 848 u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync);
793
794int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
795 u16 cmd_action, u32 cmd_oid, void *data_buf);
796 849
797void mwifiex_cmd_timeout_func(unsigned long function_context); 850void mwifiex_cmd_timeout_func(unsigned long function_context);
798 851
@@ -880,6 +933,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
880void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason); 933void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
881u8 mwifiex_band_to_radio_type(u8 band); 934u8 mwifiex_band_to_radio_type(u8 band);
882int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); 935int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
936void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter);
883int mwifiex_adhoc_start(struct mwifiex_private *priv, 937int mwifiex_adhoc_start(struct mwifiex_private *priv,
884 struct cfg80211_ssid *adhoc_ssid); 938 struct cfg80211_ssid *adhoc_ssid);
885int mwifiex_adhoc_join(struct mwifiex_private *priv, 939int mwifiex_adhoc_join(struct mwifiex_private *priv,
@@ -938,6 +992,12 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
938 struct cfg80211_ap_settings *params); 992 struct cfg80211_ap_settings *params);
939void mwifiex_set_ba_params(struct mwifiex_private *priv); 993void mwifiex_set_ba_params(struct mwifiex_private *priv);
940void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv); 994void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
995int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
996 struct host_cmd_ds_command *cmd,
997 void *data_buf);
998int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
999int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
1000 void *buf);
941 1001
942/* 1002/*
943 * This function checks if the queuing is RA based or not. 1003 * This function checks if the queuing is RA based or not.
@@ -1078,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
1078 const u8 *key, int key_len, u8 key_index, 1138 const u8 *key, int key_len, u8 key_index,
1079 const u8 *mac_addr, int disable); 1139 const u8 *mac_addr, int disable);
1080 1140
1081int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len); 1141int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
1082 1142
1083int mwifiex_get_ver_ext(struct mwifiex_private *priv); 1143int mwifiex_get_ver_ext(struct mwifiex_private *priv);
1084 1144
@@ -1159,6 +1219,32 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
1159 1219
1160extern const struct ethtool_ops mwifiex_ethtool_ops; 1220extern const struct ethtool_ops mwifiex_ethtool_ops;
1161 1221
1222void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
1223void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
1224void
1225mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
1226 int ies_len, struct mwifiex_sta_node *node);
1227struct mwifiex_sta_node *
1228mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
1229struct mwifiex_sta_node *
1230mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
1231int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
1232 u8 action_code, u8 dialog_token,
1233 u16 status_code, const u8 *extra_ies,
1234 size_t extra_ies_len);
1235int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
1236 u8 *peer, u8 action_code, u8 dialog_token,
1237 u16 status_code, const u8 *extra_ies,
1238 size_t extra_ies_len);
1239void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1240 u8 *buf, int len);
1241int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
1242int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
1243void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
1244bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
1245u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
1246 u32 pri_chan, u8 chan_bw);
1247
1162#ifdef CONFIG_DEBUG_FS 1248#ifdef CONFIG_DEBUG_FS
1163void mwifiex_debugfs_init(void); 1249void mwifiex_debugfs_init(void);
1164void mwifiex_debugfs_remove(void); 1250void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 7fe7b53fb17a..a7e8b96b2d90 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -39,20 +39,31 @@ static struct semaphore add_remove_card_sem;
39 39
40static int 40static int
41mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, 41mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
42 int size, int flags) 42 size_t size, int flags)
43{ 43{
44 struct pcie_service_card *card = adapter->card; 44 struct pcie_service_card *card = adapter->card;
45 dma_addr_t buf_pa; 45 struct mwifiex_dma_mapping mapping;
46 46
47 buf_pa = pci_map_single(card->dev, skb->data, size, flags); 47 mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
48 if (pci_dma_mapping_error(card->dev, buf_pa)) { 48 if (pci_dma_mapping_error(card->dev, mapping.addr)) {
49 dev_err(adapter->dev, "failed to map pci memory!\n"); 49 dev_err(adapter->dev, "failed to map pci memory!\n");
50 return -1; 50 return -1;
51 } 51 }
52 memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t)); 52 mapping.len = size;
53 memcpy(skb->cb, &mapping, sizeof(mapping));
53 return 0; 54 return 0;
54} 55}
55 56
57static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
58 struct sk_buff *skb, int flags)
59{
60 struct pcie_service_card *card = adapter->card;
61 struct mwifiex_dma_mapping mapping;
62
63 MWIFIEX_SKB_PACB(skb, &mapping);
64 pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
65}
66
56/* 67/*
57 * This function reads sleep cookie and checks if FW is ready 68 * This function reads sleep cookie and checks if FW is ready
58 */ 69 */
@@ -109,6 +120,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
109 120
110 /* Indicate device suspended */ 121 /* Indicate device suspended */
111 adapter->is_suspended = true; 122 adapter->is_suspended = true;
123 adapter->hs_enabling = false;
112 124
113 return 0; 125 return 0;
114} 126}
@@ -179,6 +191,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
179 card->pcie.firmware = data->firmware; 191 card->pcie.firmware = data->firmware;
180 card->pcie.reg = data->reg; 192 card->pcie.reg = data->reg;
181 card->pcie.blksz_fw_dl = data->blksz_fw_dl; 193 card->pcie.blksz_fw_dl = data->blksz_fw_dl;
194 card->pcie.tx_buf_size = data->tx_buf_size;
182 } 195 }
183 196
184 if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops, 197 if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
@@ -199,7 +212,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
199 struct pcie_service_card *card; 212 struct pcie_service_card *card;
200 struct mwifiex_adapter *adapter; 213 struct mwifiex_adapter *adapter;
201 struct mwifiex_private *priv; 214 struct mwifiex_private *priv;
202 int i;
203 215
204 card = pci_get_drvdata(pdev); 216 card = pci_get_drvdata(pdev);
205 if (!card) 217 if (!card)
@@ -218,11 +230,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
218 mwifiex_pcie_resume(&pdev->dev); 230 mwifiex_pcie_resume(&pdev->dev);
219#endif 231#endif
220 232
221 for (i = 0; i < adapter->priv_num; i++) 233 mwifiex_deauthenticate_all(adapter);
222 if ((GET_BSS_ROLE(adapter->priv[i]) ==
223 MWIFIEX_BSS_ROLE_STA) &&
224 adapter->priv[i]->media_connected)
225 mwifiex_deauthenticate(adapter->priv[i], NULL);
226 234
227 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 235 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
228 236
@@ -320,6 +328,30 @@ static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter)
320 return; 328 return;
321} 329}
322 330
331static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
332 u32 max_delay_loop_cnt)
333{
334 struct pcie_service_card *card = adapter->card;
335 u8 *buffer;
336 u32 sleep_cookie, count;
337
338 for (count = 0; count < max_delay_loop_cnt; count++) {
339 buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN;
340 sleep_cookie = *(u32 *)buffer;
341
342 if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
343 dev_dbg(adapter->dev,
344 "sleep cookie found at count %d\n", count);
345 break;
346 }
347 usleep_range(20, 30);
348 }
349
350 if (count >= max_delay_loop_cnt)
351 dev_dbg(adapter->dev,
352 "max count reached while accessing sleep cookie\n");
353}
354
323/* This function wakes up the card by reading fw_status register. */ 355/* This function wakes up the card by reading fw_status register. */
324static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) 356static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
325{ 357{
@@ -456,7 +488,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
456 PCI_DMA_FROMDEVICE)) 488 PCI_DMA_FROMDEVICE))
457 return -1; 489 return -1;
458 490
459 MWIFIEX_SKB_PACB(skb, &buf_pa); 491 buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
460 492
461 dev_dbg(adapter->dev, 493 dev_dbg(adapter->dev,
462 "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", 494 "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -513,7 +545,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
513 PCI_DMA_FROMDEVICE)) 545 PCI_DMA_FROMDEVICE))
514 return -1; 546 return -1;
515 547
516 MWIFIEX_SKB_PACB(skb, &buf_pa); 548 buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
517 549
518 dev_dbg(adapter->dev, 550 dev_dbg(adapter->dev,
519 "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", 551 "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -549,8 +581,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
549 desc2 = card->txbd_ring[i]; 581 desc2 = card->txbd_ring[i];
550 if (card->tx_buf_list[i]) { 582 if (card->tx_buf_list[i]) {
551 skb = card->tx_buf_list[i]; 583 skb = card->tx_buf_list[i];
552 pci_unmap_single(card->dev, desc2->paddr, 584 mwifiex_unmap_pci_memory(adapter, skb,
553 skb->len, PCI_DMA_TODEVICE); 585 PCI_DMA_TODEVICE);
554 dev_kfree_skb_any(skb); 586 dev_kfree_skb_any(skb);
555 } 587 }
556 memset(desc2, 0, sizeof(*desc2)); 588 memset(desc2, 0, sizeof(*desc2));
@@ -558,8 +590,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
558 desc = card->txbd_ring[i]; 590 desc = card->txbd_ring[i];
559 if (card->tx_buf_list[i]) { 591 if (card->tx_buf_list[i]) {
560 skb = card->tx_buf_list[i]; 592 skb = card->tx_buf_list[i];
561 pci_unmap_single(card->dev, desc->paddr, 593 mwifiex_unmap_pci_memory(adapter, skb,
562 skb->len, PCI_DMA_TODEVICE); 594 PCI_DMA_TODEVICE);
563 dev_kfree_skb_any(skb); 595 dev_kfree_skb_any(skb);
564 } 596 }
565 memset(desc, 0, sizeof(*desc)); 597 memset(desc, 0, sizeof(*desc));
@@ -587,8 +619,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
587 desc2 = card->rxbd_ring[i]; 619 desc2 = card->rxbd_ring[i];
588 if (card->rx_buf_list[i]) { 620 if (card->rx_buf_list[i]) {
589 skb = card->rx_buf_list[i]; 621 skb = card->rx_buf_list[i];
590 pci_unmap_single(card->dev, desc2->paddr, 622 mwifiex_unmap_pci_memory(adapter, skb,
591 skb->len, PCI_DMA_FROMDEVICE); 623 PCI_DMA_FROMDEVICE);
592 dev_kfree_skb_any(skb); 624 dev_kfree_skb_any(skb);
593 } 625 }
594 memset(desc2, 0, sizeof(*desc2)); 626 memset(desc2, 0, sizeof(*desc2));
@@ -596,8 +628,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
596 desc = card->rxbd_ring[i]; 628 desc = card->rxbd_ring[i];
597 if (card->rx_buf_list[i]) { 629 if (card->rx_buf_list[i]) {
598 skb = card->rx_buf_list[i]; 630 skb = card->rx_buf_list[i];
599 pci_unmap_single(card->dev, desc->paddr, 631 mwifiex_unmap_pci_memory(adapter, skb,
600 skb->len, PCI_DMA_FROMDEVICE); 632 PCI_DMA_FROMDEVICE);
601 dev_kfree_skb_any(skb); 633 dev_kfree_skb_any(skb);
602 } 634 }
603 memset(desc, 0, sizeof(*desc)); 635 memset(desc, 0, sizeof(*desc));
@@ -622,8 +654,8 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
622 desc = card->evtbd_ring[i]; 654 desc = card->evtbd_ring[i];
623 if (card->evt_buf_list[i]) { 655 if (card->evt_buf_list[i]) {
624 skb = card->evt_buf_list[i]; 656 skb = card->evt_buf_list[i];
625 pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE, 657 mwifiex_unmap_pci_memory(adapter, skb,
626 PCI_DMA_FROMDEVICE); 658 PCI_DMA_FROMDEVICE);
627 dev_kfree_skb_any(skb); 659 dev_kfree_skb_any(skb);
628 } 660 }
629 card->evt_buf_list[i] = NULL; 661 card->evt_buf_list[i] = NULL;
@@ -861,7 +893,6 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
861static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) 893static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
862{ 894{
863 struct pcie_service_card *card; 895 struct pcie_service_card *card;
864 dma_addr_t buf_pa;
865 896
866 if (!adapter) 897 if (!adapter)
867 return 0; 898 return 0;
@@ -869,16 +900,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
869 card = adapter->card; 900 card = adapter->card;
870 901
871 if (card && card->cmdrsp_buf) { 902 if (card && card->cmdrsp_buf) {
872 MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa); 903 mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
873 pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, 904 PCI_DMA_FROMDEVICE);
874 PCI_DMA_FROMDEVICE);
875 dev_kfree_skb_any(card->cmdrsp_buf); 905 dev_kfree_skb_any(card->cmdrsp_buf);
876 } 906 }
877 907
878 if (card && card->cmd_buf) { 908 if (card && card->cmd_buf) {
879 MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa); 909 mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
880 pci_unmap_single(card->dev, buf_pa, card->cmd_buf->len, 910 PCI_DMA_TODEVICE);
881 PCI_DMA_TODEVICE);
882 } 911 }
883 return 0; 912 return 0;
884} 913}
@@ -956,7 +985,6 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
956static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) 985static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
957{ 986{
958 struct sk_buff *skb; 987 struct sk_buff *skb;
959 dma_addr_t buf_pa;
960 u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0; 988 u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
961 struct mwifiex_pcie_buf_desc *desc; 989 struct mwifiex_pcie_buf_desc *desc;
962 struct mwifiex_pfu_buf_desc *desc2; 990 struct mwifiex_pfu_buf_desc *desc2;
@@ -986,13 +1014,13 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
986 reg->tx_start_ptr; 1014 reg->tx_start_ptr;
987 1015
988 skb = card->tx_buf_list[wrdoneidx]; 1016 skb = card->tx_buf_list[wrdoneidx];
1017
989 if (skb) { 1018 if (skb) {
990 dev_dbg(adapter->dev, 1019 dev_dbg(adapter->dev,
991 "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", 1020 "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
992 skb, wrdoneidx); 1021 skb, wrdoneidx);
993 MWIFIEX_SKB_PACB(skb, &buf_pa); 1022 mwifiex_unmap_pci_memory(adapter, skb,
994 pci_unmap_single(card->dev, buf_pa, skb->len, 1023 PCI_DMA_TODEVICE);
995 PCI_DMA_TODEVICE);
996 1024
997 unmap_count++; 1025 unmap_count++;
998 1026
@@ -1006,7 +1034,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
1006 card->tx_buf_list[wrdoneidx] = NULL; 1034 card->tx_buf_list[wrdoneidx] = NULL;
1007 1035
1008 if (reg->pfu_enabled) { 1036 if (reg->pfu_enabled) {
1009 desc2 = (void *)card->txbd_ring[wrdoneidx]; 1037 desc2 = card->txbd_ring[wrdoneidx];
1010 memset(desc2, 0, sizeof(*desc2)); 1038 memset(desc2, 0, sizeof(*desc2));
1011 } else { 1039 } else {
1012 desc = card->txbd_ring[wrdoneidx]; 1040 desc = card->txbd_ring[wrdoneidx];
@@ -1082,16 +1110,16 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
1082 tmp = (__le16 *)&payload[2]; 1110 tmp = (__le16 *)&payload[2];
1083 *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA); 1111 *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
1084 1112
1085 if (mwifiex_map_pci_memory(adapter, skb, skb->len , 1113 if (mwifiex_map_pci_memory(adapter, skb, skb->len,
1086 PCI_DMA_TODEVICE)) 1114 PCI_DMA_TODEVICE))
1087 return -1; 1115 return -1;
1088 1116
1089 wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr; 1117 wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
1090 MWIFIEX_SKB_PACB(skb, &buf_pa); 1118 buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
1091 card->tx_buf_list[wrindx] = skb; 1119 card->tx_buf_list[wrindx] = skb;
1092 1120
1093 if (reg->pfu_enabled) { 1121 if (reg->pfu_enabled) {
1094 desc2 = (void *)card->txbd_ring[wrindx]; 1122 desc2 = card->txbd_ring[wrindx];
1095 desc2->paddr = buf_pa; 1123 desc2->paddr = buf_pa;
1096 desc2->len = (u16)skb->len; 1124 desc2->len = (u16)skb->len;
1097 desc2->frag_len = (u16)skb->len; 1125 desc2->frag_len = (u16)skb->len;
@@ -1162,8 +1190,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
1162 1190
1163 return -EINPROGRESS; 1191 return -EINPROGRESS;
1164done_unmap: 1192done_unmap:
1165 MWIFIEX_SKB_PACB(skb, &buf_pa); 1193 mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
1166 pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
1167 card->tx_buf_list[wrindx] = NULL; 1194 card->tx_buf_list[wrindx] = NULL;
1168 if (reg->pfu_enabled) 1195 if (reg->pfu_enabled)
1169 memset(desc2, 0, sizeof(*desc2)); 1196 memset(desc2, 0, sizeof(*desc2));
@@ -1217,9 +1244,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
1217 if (!skb_data) 1244 if (!skb_data)
1218 return -ENOMEM; 1245 return -ENOMEM;
1219 1246
1220 MWIFIEX_SKB_PACB(skb_data, &buf_pa); 1247 mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
1221 pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
1222 PCI_DMA_FROMDEVICE);
1223 card->rx_buf_list[rd_index] = NULL; 1248 card->rx_buf_list[rd_index] = NULL;
1224 1249
1225 /* Get data length from interface header - 1250 /* Get data length from interface header -
@@ -1246,7 +1271,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
1246 PCI_DMA_FROMDEVICE)) 1271 PCI_DMA_FROMDEVICE))
1247 return -1; 1272 return -1;
1248 1273
1249 MWIFIEX_SKB_PACB(skb_tmp, &buf_pa); 1274 buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
1250 1275
1251 dev_dbg(adapter->dev, 1276 dev_dbg(adapter->dev,
1252 "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n", 1277 "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
@@ -1254,7 +1279,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
1254 card->rx_buf_list[rd_index] = skb_tmp; 1279 card->rx_buf_list[rd_index] = skb_tmp;
1255 1280
1256 if (reg->pfu_enabled) { 1281 if (reg->pfu_enabled) {
1257 desc2 = (void *)card->rxbd_ring[rd_index]; 1282 desc2 = card->rxbd_ring[rd_index];
1258 desc2->paddr = buf_pa; 1283 desc2->paddr = buf_pa;
1259 desc2->len = skb_tmp->len; 1284 desc2->len = skb_tmp->len;
1260 desc2->frag_len = skb_tmp->len; 1285 desc2->frag_len = skb_tmp->len;
@@ -1322,7 +1347,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
1322 if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE)) 1347 if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
1323 return -1; 1348 return -1;
1324 1349
1325 MWIFIEX_SKB_PACB(skb, &buf_pa); 1350 buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
1326 1351
1327 /* Write the lower 32bits of the physical address to low command 1352 /* Write the lower 32bits of the physical address to low command
1328 * address scratch register 1353 * address scratch register
@@ -1331,8 +1356,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
1331 dev_err(adapter->dev, 1356 dev_err(adapter->dev,
1332 "%s: failed to write download command to boot code.\n", 1357 "%s: failed to write download command to boot code.\n",
1333 __func__); 1358 __func__);
1334 pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, 1359 mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
1335 PCI_DMA_TODEVICE);
1336 return -1; 1360 return -1;
1337 } 1361 }
1338 1362
@@ -1344,8 +1368,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
1344 dev_err(adapter->dev, 1368 dev_err(adapter->dev,
1345 "%s: failed to write download command to boot code.\n", 1369 "%s: failed to write download command to boot code.\n",
1346 __func__); 1370 __func__);
1347 pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, 1371 mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
1348 PCI_DMA_TODEVICE);
1349 return -1; 1372 return -1;
1350 } 1373 }
1351 1374
@@ -1354,8 +1377,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
1354 dev_err(adapter->dev, 1377 dev_err(adapter->dev,
1355 "%s: failed to write command len to cmd_size scratch reg\n", 1378 "%s: failed to write command len to cmd_size scratch reg\n",
1356 __func__); 1379 __func__);
1357 pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, 1380 mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
1358 PCI_DMA_TODEVICE);
1359 return -1; 1381 return -1;
1360 } 1382 }
1361 1383
@@ -1364,8 +1386,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
1364 CPU_INTR_DOOR_BELL)) { 1386 CPU_INTR_DOOR_BELL)) {
1365 dev_err(adapter->dev, 1387 dev_err(adapter->dev,
1366 "%s: failed to assert door-bell intr\n", __func__); 1388 "%s: failed to assert door-bell intr\n", __func__);
1367 pci_unmap_single(card->dev, buf_pa, 1389 mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
1368 MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE);
1369 return -1; 1390 return -1;
1370 } 1391 }
1371 1392
@@ -1439,7 +1460,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
1439 */ 1460 */
1440 1461
1441 if (card->cmdrsp_buf) { 1462 if (card->cmdrsp_buf) {
1442 MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa); 1463 cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf);
1443 /* Write the lower 32bits of the cmdrsp buffer physical 1464 /* Write the lower 32bits of the cmdrsp buffer physical
1444 address */ 1465 address */
1445 if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 1466 if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
@@ -1460,7 +1481,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
1460 } 1481 }
1461 } 1482 }
1462 1483
1463 MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa); 1484 cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf);
1464 /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */ 1485 /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
1465 if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, 1486 if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
1466 (u32)cmd_buf_pa)) { 1487 (u32)cmd_buf_pa)) {
@@ -1514,13 +1535,17 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
1514 int count = 0; 1535 int count = 0;
1515 u16 rx_len; 1536 u16 rx_len;
1516 __le16 pkt_len; 1537 __le16 pkt_len;
1517 dma_addr_t buf_pa;
1518 1538
1519 dev_dbg(adapter->dev, "info: Rx CMD Response\n"); 1539 dev_dbg(adapter->dev, "info: Rx CMD Response\n");
1520 1540
1521 MWIFIEX_SKB_PACB(skb, &buf_pa); 1541 mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
1522 pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, 1542
1523 PCI_DMA_FROMDEVICE); 1543 /* Unmap the command as a response has been received. */
1544 if (card->cmd_buf) {
1545 mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
1546 PCI_DMA_TODEVICE);
1547 card->cmd_buf = NULL;
1548 }
1524 1549
1525 pkt_len = *((__le16 *)skb->data); 1550 pkt_len = *((__le16 *)skb->data);
1526 rx_len = le16_to_cpu(pkt_len); 1551 rx_len = le16_to_cpu(pkt_len);
@@ -1539,6 +1564,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
1539 "Write register failed\n"); 1564 "Write register failed\n");
1540 return -1; 1565 return -1;
1541 } 1566 }
1567 mwifiex_delay_for_sleep_cookie(adapter,
1568 MWIFIEX_MAX_DELAY_COUNT);
1542 while (reg->sleep_cookie && (count++ < 10) && 1569 while (reg->sleep_cookie && (count++ < 10) &&
1543 mwifiex_pcie_ok_to_access_hw(adapter)) 1570 mwifiex_pcie_ok_to_access_hw(adapter))
1544 usleep_range(50, 60); 1571 usleep_range(50, 60);
@@ -1552,8 +1579,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
1552 if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, 1579 if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
1553 PCI_DMA_FROMDEVICE)) 1580 PCI_DMA_FROMDEVICE))
1554 return -1; 1581 return -1;
1555
1556 MWIFIEX_SKB_PACB(skb, &buf_pa);
1557 } else if (mwifiex_pcie_ok_to_access_hw(adapter)) { 1582 } else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
1558 adapter->curr_cmd->resp_skb = skb; 1583 adapter->curr_cmd->resp_skb = skb;
1559 adapter->cmd_resp_received = true; 1584 adapter->cmd_resp_received = true;
@@ -1588,8 +1613,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
1588 struct sk_buff *skb) 1613 struct sk_buff *skb)
1589{ 1614{
1590 struct pcie_service_card *card = adapter->card; 1615 struct pcie_service_card *card = adapter->card;
1591 dma_addr_t buf_pa;
1592 struct sk_buff *skb_tmp;
1593 1616
1594 if (skb) { 1617 if (skb) {
1595 card->cmdrsp_buf = skb; 1618 card->cmdrsp_buf = skb;
@@ -1599,14 +1622,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
1599 return -1; 1622 return -1;
1600 } 1623 }
1601 1624
1602 skb_tmp = card->cmd_buf;
1603 if (skb_tmp) {
1604 MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
1605 pci_unmap_single(card->dev, buf_pa, skb_tmp->len,
1606 PCI_DMA_FROMDEVICE);
1607 card->cmd_buf = NULL;
1608 }
1609
1610 return 0; 1625 return 0;
1611} 1626}
1612 1627
@@ -1619,7 +1634,6 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
1619 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; 1634 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
1620 u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; 1635 u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
1621 u32 wrptr, event; 1636 u32 wrptr, event;
1622 dma_addr_t buf_pa;
1623 struct mwifiex_evt_buf_desc *desc; 1637 struct mwifiex_evt_buf_desc *desc;
1624 1638
1625 if (!mwifiex_pcie_ok_to_access_hw(adapter)) 1639 if (!mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1655,9 +1669,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
1655 1669
1656 dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr); 1670 dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
1657 skb_cmd = card->evt_buf_list[rdptr]; 1671 skb_cmd = card->evt_buf_list[rdptr];
1658 MWIFIEX_SKB_PACB(skb_cmd, &buf_pa); 1672 mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
1659 pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE,
1660 PCI_DMA_FROMDEVICE);
1661 1673
1662 /* Take the pointer and set it to event pointer in adapter 1674 /* Take the pointer and set it to event pointer in adapter
1663 and will return back after event handling callback */ 1675 and will return back after event handling callback */
@@ -1703,7 +1715,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
1703 int ret = 0; 1715 int ret = 0;
1704 u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; 1716 u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
1705 u32 wrptr; 1717 u32 wrptr;
1706 dma_addr_t buf_pa;
1707 struct mwifiex_evt_buf_desc *desc; 1718 struct mwifiex_evt_buf_desc *desc;
1708 1719
1709 if (!skb) 1720 if (!skb)
@@ -1728,11 +1739,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
1728 MAX_EVENT_SIZE, 1739 MAX_EVENT_SIZE,
1729 PCI_DMA_FROMDEVICE)) 1740 PCI_DMA_FROMDEVICE))
1730 return -1; 1741 return -1;
1731 MWIFIEX_SKB_PACB(skb, &buf_pa);
1732 card->evt_buf_list[rdptr] = skb; 1742 card->evt_buf_list[rdptr] = skb;
1733 MWIFIEX_SKB_PACB(skb, &buf_pa);
1734 desc = card->evtbd_ring[rdptr]; 1743 desc = card->evtbd_ring[rdptr];
1735 desc->paddr = buf_pa; 1744 desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
1736 desc->len = (u16)skb->len; 1745 desc->len = (u16)skb->len;
1737 desc->flags = 0; 1746 desc->flags = 0;
1738 skb = NULL; 1747 skb = NULL;
@@ -1782,7 +1791,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
1782 struct sk_buff *skb; 1791 struct sk_buff *skb;
1783 u32 txlen, tx_blocks = 0, tries, len; 1792 u32 txlen, tx_blocks = 0, tries, len;
1784 u32 block_retry_cnt = 0; 1793 u32 block_retry_cnt = 0;
1785 dma_addr_t buf_pa;
1786 struct pcie_service_card *card = adapter->card; 1794 struct pcie_service_card *card = adapter->card;
1787 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; 1795 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
1788 1796
@@ -1880,8 +1888,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
1880 goto done; 1888 goto done;
1881 } 1889 }
1882 1890
1883 MWIFIEX_SKB_PACB(skb, &buf_pa);
1884
1885 /* Wait for the command done interrupt */ 1891 /* Wait for the command done interrupt */
1886 do { 1892 do {
1887 if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS, 1893 if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
@@ -1889,16 +1895,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
1889 dev_err(adapter->dev, "%s: Failed to read " 1895 dev_err(adapter->dev, "%s: Failed to read "
1890 "interrupt status during fw dnld.\n", 1896 "interrupt status during fw dnld.\n",
1891 __func__); 1897 __func__);
1892 pci_unmap_single(card->dev, buf_pa, skb->len, 1898 mwifiex_unmap_pci_memory(adapter, skb,
1893 PCI_DMA_TODEVICE); 1899 PCI_DMA_TODEVICE);
1894 ret = -1; 1900 ret = -1;
1895 goto done; 1901 goto done;
1896 } 1902 }
1897 } while ((ireg_intr & CPU_INTR_DOOR_BELL) == 1903 } while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
1898 CPU_INTR_DOOR_BELL); 1904 CPU_INTR_DOOR_BELL);
1899 1905
1900 pci_unmap_single(card->dev, buf_pa, skb->len, 1906 mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
1901 PCI_DMA_TODEVICE);
1902 1907
1903 offset += txlen; 1908 offset += txlen;
1904 } while (true); 1909 } while (true);
@@ -2338,6 +2343,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
2338 } 2343 }
2339 2344
2340 adapter->dev = &pdev->dev; 2345 adapter->dev = &pdev->dev;
2346 adapter->tx_buf_size = card->pcie.tx_buf_size;
2341 strcpy(adapter->fw_name, card->pcie.firmware); 2347 strcpy(adapter->fw_name, card->pcie.firmware);
2342 2348
2343 return 0; 2349 return 0;
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index d322ab8604ea..e8ec561f8a64 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -97,6 +97,8 @@
97#define MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD 256 97#define MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD 256
98/* FW awake cookie after FW ready */ 98/* FW awake cookie after FW ready */
99#define FW_AWAKE_COOKIE (0xAA55AA55) 99#define FW_AWAKE_COOKIE (0xAA55AA55)
100#define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF
101#define MWIFIEX_MAX_DELAY_COUNT 5
100 102
101struct mwifiex_pcie_card_reg { 103struct mwifiex_pcie_card_reg {
102 u16 cmd_addr_lo; 104 u16 cmd_addr_lo;
@@ -195,18 +197,21 @@ struct mwifiex_pcie_device {
195 const char *firmware; 197 const char *firmware;
196 const struct mwifiex_pcie_card_reg *reg; 198 const struct mwifiex_pcie_card_reg *reg;
197 u16 blksz_fw_dl; 199 u16 blksz_fw_dl;
200 u16 tx_buf_size;
198}; 201};
199 202
200static const struct mwifiex_pcie_device mwifiex_pcie8766 = { 203static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
201 .firmware = PCIE8766_DEFAULT_FW_NAME, 204 .firmware = PCIE8766_DEFAULT_FW_NAME,
202 .reg = &mwifiex_reg_8766, 205 .reg = &mwifiex_reg_8766,
203 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 206 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
207 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
204}; 208};
205 209
206static const struct mwifiex_pcie_device mwifiex_pcie8897 = { 210static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
207 .firmware = PCIE8897_DEFAULT_FW_NAME, 211 .firmware = PCIE8897_DEFAULT_FW_NAME,
208 .reg = &mwifiex_reg_8897, 212 .reg = &mwifiex_reg_8897,
209 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 213 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
214 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
210}; 215};
211 216
212struct mwifiex_evt_buf_desc { 217struct mwifiex_evt_buf_desc {
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 668547c2de84..7b3af3d29ded 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -591,11 +591,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
591 *chan_tlv_out, 591 *chan_tlv_out,
592 struct mwifiex_chan_scan_param_set *scan_chan_list) 592 struct mwifiex_chan_scan_param_set *scan_chan_list)
593{ 593{
594 struct mwifiex_adapter *adapter = priv->adapter;
594 int ret = 0; 595 int ret = 0;
595 struct mwifiex_chan_scan_param_set *tmp_chan_list; 596 struct mwifiex_chan_scan_param_set *tmp_chan_list;
596 struct mwifiex_chan_scan_param_set *start_chan; 597 struct mwifiex_chan_scan_param_set *start_chan;
597 598 struct cmd_ctrl_node *cmd_node, *tmp_node;
598 u32 tlv_idx, rates_size; 599 unsigned long flags;
600 u32 tlv_idx, rates_size, cmd_no;
599 u32 total_scan_time; 601 u32 total_scan_time;
600 u32 done_early; 602 u32 done_early;
601 u8 radio_type; 603 u8 radio_type;
@@ -733,9 +735,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
733 735
734 /* Send the scan command to the firmware with the specified 736 /* Send the scan command to the firmware with the specified
735 cfg */ 737 cfg */
736 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN, 738 if (priv->adapter->ext_scan)
737 HostCmd_ACT_GEN_SET, 0, 739 cmd_no = HostCmd_CMD_802_11_SCAN_EXT;
738 scan_cfg_out); 740 else
741 cmd_no = HostCmd_CMD_802_11_SCAN;
742
743 ret = mwifiex_send_cmd(priv, cmd_no, HostCmd_ACT_GEN_SET,
744 0, scan_cfg_out, false);
739 745
740 /* rate IE is updated per scan command but same starting 746 /* rate IE is updated per scan command but same starting
741 * pointer is used each time so that rate IE from earlier 747 * pointer is used each time so that rate IE from earlier
@@ -744,8 +750,19 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
744 scan_cfg_out->tlv_buf_len -= 750 scan_cfg_out->tlv_buf_len -=
745 sizeof(struct mwifiex_ie_types_header) + rates_size; 751 sizeof(struct mwifiex_ie_types_header) + rates_size;
746 752
747 if (ret) 753 if (ret) {
754 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
755 list_for_each_entry_safe(cmd_node, tmp_node,
756 &adapter->scan_pending_q,
757 list) {
758 list_del(&cmd_node->list);
759 cmd_node->wait_q_enabled = false;
760 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
761 }
762 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
763 flags);
748 break; 764 break;
765 }
749 } 766 }
750 767
751 if (ret) 768 if (ret)
@@ -786,6 +803,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
786 struct mwifiex_adapter *adapter = priv->adapter; 803 struct mwifiex_adapter *adapter = priv->adapter;
787 struct mwifiex_ie_types_num_probes *num_probes_tlv; 804 struct mwifiex_ie_types_num_probes *num_probes_tlv;
788 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; 805 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
806 struct mwifiex_ie_types_bssid_list *bssid_tlv;
789 u8 *tlv_pos; 807 u8 *tlv_pos;
790 u32 num_probes; 808 u32 num_probes;
791 u32 ssid_len; 809 u32 ssid_len;
@@ -848,6 +866,17 @@ mwifiex_config_scan(struct mwifiex_private *priv,
848 user_scan_in->specific_bssid, 866 user_scan_in->specific_bssid,
849 sizeof(scan_cfg_out->specific_bssid)); 867 sizeof(scan_cfg_out->specific_bssid));
850 868
869 if (adapter->ext_scan &&
870 !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
871 bssid_tlv =
872 (struct mwifiex_ie_types_bssid_list *)tlv_pos;
873 bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
874 bssid_tlv->header.len = cpu_to_le16(ETH_ALEN);
875 memcpy(bssid_tlv->bssid, user_scan_in->specific_bssid,
876 ETH_ALEN);
877 tlv_pos += sizeof(struct mwifiex_ie_types_bssid_list);
878 }
879
851 for (i = 0; i < user_scan_in->num_ssids; i++) { 880 for (i = 0; i < user_scan_in->num_ssids; i++) {
852 ssid_len = user_scan_in->ssid_list[i].ssid_len; 881 ssid_len = user_scan_in->ssid_list[i].ssid_len;
853 882
@@ -941,7 +970,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
941 cpu_to_le16(sizeof(struct ieee80211_ht_cap)); 970 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
942 radio_type = 971 radio_type =
943 mwifiex_band_to_radio_type(priv->adapter->config_bands); 972 mwifiex_band_to_radio_type(priv->adapter->config_bands);
944 mwifiex_fill_cap_info(priv, radio_type, ht_cap); 973 mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
945 tlv_pos += sizeof(struct mwifiex_ie_types_htcap); 974 tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
946 } 975 }
947 976
@@ -1576,6 +1605,228 @@ done:
1576 return 0; 1605 return 0;
1577} 1606}
1578 1607
1608static int
1609mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
1610 u32 *bytes_left, u64 fw_tsf, u8 *radio_type,
1611 bool ext_scan, s32 rssi_val)
1612{
1613 struct mwifiex_adapter *adapter = priv->adapter;
1614 struct mwifiex_chan_freq_power *cfp;
1615 struct cfg80211_bss *bss;
1616 u8 bssid[ETH_ALEN];
1617 s32 rssi;
1618 const u8 *ie_buf;
1619 size_t ie_len;
1620 u16 channel = 0;
1621 u16 beacon_size = 0;
1622 u32 curr_bcn_bytes;
1623 u32 freq;
1624 u16 beacon_period;
1625 u16 cap_info_bitmap;
1626 u8 *current_ptr;
1627 u64 timestamp;
1628 struct mwifiex_fixed_bcn_param *bcn_param;
1629 struct mwifiex_bss_priv *bss_priv;
1630
1631 if (*bytes_left >= sizeof(beacon_size)) {
1632 /* Extract & convert beacon size from command buffer */
1633 memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
1634 *bytes_left -= sizeof(beacon_size);
1635 *bss_info += sizeof(beacon_size);
1636 }
1637
1638 if (!beacon_size || beacon_size > *bytes_left) {
1639 *bss_info += *bytes_left;
1640 *bytes_left = 0;
1641 return -EFAULT;
1642 }
1643
1644 /* Initialize the current working beacon pointer for this BSS
1645 * iteration
1646 */
1647 current_ptr = *bss_info;
1648
1649 /* Advance the return beacon pointer past the current beacon */
1650 *bss_info += beacon_size;
1651 *bytes_left -= beacon_size;
1652
1653 curr_bcn_bytes = beacon_size;
1654
1655 /* First 5 fields are bssid, RSSI(for legacy scan only),
1656 * time stamp, beacon interval, and capability information
1657 */
1658 if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
1659 sizeof(struct mwifiex_fixed_bcn_param)) {
1660 dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
1661 return -EFAULT;
1662 }
1663
1664 memcpy(bssid, current_ptr, ETH_ALEN);
1665 current_ptr += ETH_ALEN;
1666 curr_bcn_bytes -= ETH_ALEN;
1667
1668 if (!ext_scan) {
1669 rssi = (s32) *current_ptr;
1670 rssi = (-rssi) * 100; /* Convert dBm to mBm */
1671 current_ptr += sizeof(u8);
1672 curr_bcn_bytes -= sizeof(u8);
1673 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
1674 } else {
1675 rssi = rssi_val;
1676 }
1677
1678 bcn_param = (struct mwifiex_fixed_bcn_param *)current_ptr;
1679 current_ptr += sizeof(*bcn_param);
1680 curr_bcn_bytes -= sizeof(*bcn_param);
1681
1682 timestamp = le64_to_cpu(bcn_param->timestamp);
1683 beacon_period = le16_to_cpu(bcn_param->beacon_period);
1684
1685 cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
1686 dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
1687 cap_info_bitmap);
1688
1689 /* Rest of the current buffer are IE's */
1690 ie_buf = current_ptr;
1691 ie_len = curr_bcn_bytes;
1692 dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
1693 curr_bcn_bytes);
1694
1695 while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
1696 u8 element_id, element_len;
1697
1698 element_id = *current_ptr;
1699 element_len = *(current_ptr + 1);
1700 if (curr_bcn_bytes < element_len +
1701 sizeof(struct ieee_types_header)) {
1702 dev_err(adapter->dev,
1703 "%s: bytes left < IE length\n", __func__);
1704 return -EFAULT;
1705 }
1706 if (element_id == WLAN_EID_DS_PARAMS) {
1707 channel = *(current_ptr +
1708 sizeof(struct ieee_types_header));
1709 break;
1710 }
1711
1712 current_ptr += element_len + sizeof(struct ieee_types_header);
1713 curr_bcn_bytes -= element_len +
1714 sizeof(struct ieee_types_header);
1715 }
1716
1717 if (channel) {
1718 struct ieee80211_channel *chan;
1719 u8 band;
1720
1721 /* Skip entry if on csa closed channel */
1722 if (channel == priv->csa_chan) {
1723 dev_dbg(adapter->dev,
1724 "Dropping entry on csa closed channel\n");
1725 return 0;
1726 }
1727
1728 band = BAND_G;
1729 if (radio_type)
1730 band = mwifiex_radio_type_to_band(*radio_type &
1731 (BIT(0) | BIT(1)));
1732
1733 cfp = mwifiex_get_cfp(priv, band, channel, 0);
1734
1735 freq = cfp ? cfp->freq : 0;
1736
1737 chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
1738
1739 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
1740 bss = cfg80211_inform_bss(priv->wdev->wiphy,
1741 chan, bssid, timestamp,
1742 cap_info_bitmap, beacon_period,
1743 ie_buf, ie_len, rssi, GFP_KERNEL);
1744 bss_priv = (struct mwifiex_bss_priv *)bss->priv;
1745 bss_priv->band = band;
1746 bss_priv->fw_tsf = fw_tsf;
1747 if (priv->media_connected &&
1748 !memcmp(bssid, priv->curr_bss_params.bss_descriptor
1749 .mac_address, ETH_ALEN))
1750 mwifiex_update_curr_bss_params(priv, bss);
1751 cfg80211_put_bss(priv->wdev->wiphy, bss);
1752 }
1753 } else {
1754 dev_dbg(adapter->dev, "missing BSS channel IE\n");
1755 }
1756
1757 return 0;
1758}
1759
1760static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
1761{
1762 struct mwifiex_adapter *adapter = priv->adapter;
1763 struct cmd_ctrl_node *cmd_node;
1764 unsigned long flags;
1765
1766 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
1767 if (list_empty(&adapter->scan_pending_q)) {
1768 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
1769 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
1770 adapter->scan_processing = false;
1771 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1772
1773 /* Need to indicate IOCTL complete */
1774 if (adapter->curr_cmd->wait_q_enabled) {
1775 adapter->cmd_wait_q.status = 0;
1776 if (!priv->scan_request) {
1777 dev_dbg(adapter->dev,
1778 "complete internal scan\n");
1779 mwifiex_complete_cmd(adapter,
1780 adapter->curr_cmd);
1781 }
1782 }
1783 if (priv->report_scan_result)
1784 priv->report_scan_result = false;
1785
1786 if (priv->scan_request) {
1787 dev_dbg(adapter->dev, "info: notifying scan done\n");
1788 cfg80211_scan_done(priv->scan_request, 0);
1789 priv->scan_request = NULL;
1790 } else {
1791 priv->scan_aborting = false;
1792 dev_dbg(adapter->dev, "info: scan already aborted\n");
1793 }
1794 } else {
1795 if ((priv->scan_aborting && !priv->scan_request) ||
1796 priv->scan_block) {
1797 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1798 flags);
1799 adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
1800 mod_timer(&priv->scan_delay_timer, jiffies);
1801 dev_dbg(priv->adapter->dev,
1802 "info: %s: triggerring scan abort\n", __func__);
1803 } else if (!mwifiex_wmm_lists_empty(adapter) &&
1804 (priv->scan_request && (priv->scan_request->flags &
1805 NL80211_SCAN_FLAG_LOW_PRIORITY))) {
1806 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1807 flags);
1808 adapter->scan_delay_cnt = 1;
1809 mod_timer(&priv->scan_delay_timer, jiffies +
1810 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
1811 dev_dbg(priv->adapter->dev,
1812 "info: %s: deferring scan\n", __func__);
1813 } else {
1814 /* Get scan command from scan_pending_q and put to
1815 * cmd_pending_q
1816 */
1817 cmd_node = list_first_entry(&adapter->scan_pending_q,
1818 struct cmd_ctrl_node, list);
1819 list_del(&cmd_node->list);
1820 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1821 flags);
1822 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1823 true);
1824 }
1825 }
1826
1827 return;
1828}
1829
1579/* 1830/*
1580 * This function handles the command response of scan. 1831 * This function handles the command response of scan.
1581 * 1832 *
@@ -1600,7 +1851,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1600{ 1851{
1601 int ret = 0; 1852 int ret = 0;
1602 struct mwifiex_adapter *adapter = priv->adapter; 1853 struct mwifiex_adapter *adapter = priv->adapter;
1603 struct cmd_ctrl_node *cmd_node;
1604 struct host_cmd_ds_802_11_scan_rsp *scan_rsp; 1854 struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
1605 struct mwifiex_ie_types_data *tlv_data; 1855 struct mwifiex_ie_types_data *tlv_data;
1606 struct mwifiex_ie_types_tsf_timestamp *tsf_tlv; 1856 struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
@@ -1609,12 +1859,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1609 u32 bytes_left; 1859 u32 bytes_left;
1610 u32 idx; 1860 u32 idx;
1611 u32 tlv_buf_size; 1861 u32 tlv_buf_size;
1612 struct mwifiex_chan_freq_power *cfp;
1613 struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv; 1862 struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
1614 struct chan_band_param_set *chan_band; 1863 struct chan_band_param_set *chan_band;
1615 u8 is_bgscan_resp; 1864 u8 is_bgscan_resp;
1616 unsigned long flags; 1865 __le64 fw_tsf = 0;
1617 struct cfg80211_bss *bss; 1866 u8 *radio_type;
1618 1867
1619 is_bgscan_resp = (le16_to_cpu(resp->command) 1868 is_bgscan_resp = (le16_to_cpu(resp->command)
1620 == HostCmd_CMD_802_11_BG_SCAN_QUERY); 1869 == HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -1676,220 +1925,194 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1676 &chan_band_tlv); 1925 &chan_band_tlv);
1677 1926
1678 for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) { 1927 for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
1679 u8 bssid[ETH_ALEN]; 1928 /*
1680 s32 rssi; 1929 * If the TSF TLV was appended to the scan results, save this
1681 const u8 *ie_buf; 1930 * entry's TSF value in the fw_tsf field. It is the firmware's
1682 size_t ie_len; 1931 * TSF value at the time the beacon or probe response was
1683 u16 channel = 0; 1932 * received.
1684 __le64 fw_tsf = 0; 1933 */
1685 u16 beacon_size = 0; 1934 if (tsf_tlv)
1686 u32 curr_bcn_bytes; 1935 memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
1687 u32 freq; 1936 sizeof(fw_tsf));
1688 u16 beacon_period;
1689 u16 cap_info_bitmap;
1690 u8 *current_ptr;
1691 u64 timestamp;
1692 struct mwifiex_bcn_param *bcn_param;
1693 struct mwifiex_bss_priv *bss_priv;
1694
1695 if (bytes_left >= sizeof(beacon_size)) {
1696 /* Extract & convert beacon size from command buffer */
1697 memcpy(&beacon_size, bss_info, sizeof(beacon_size));
1698 bytes_left -= sizeof(beacon_size);
1699 bss_info += sizeof(beacon_size);
1700 }
1701 1937
1702 if (!beacon_size || beacon_size > bytes_left) { 1938 if (chan_band_tlv) {
1703 bss_info += bytes_left; 1939 chan_band = &chan_band_tlv->chan_band_param[idx];
1704 bytes_left = 0; 1940 radio_type = &chan_band->radio_type;
1705 ret = -1; 1941 } else {
1706 goto check_next_scan; 1942 radio_type = NULL;
1707 } 1943 }
1708 1944
1709 /* Initialize the current working beacon pointer for this BSS 1945 ret = mwifiex_parse_single_response_buf(priv, &bss_info,
1710 * iteration */ 1946 &bytes_left,
1711 current_ptr = bss_info; 1947 le64_to_cpu(fw_tsf),
1948 radio_type, false, 0);
1949 if (ret)
1950 goto check_next_scan;
1951 }
1712 1952
1713 /* Advance the return beacon pointer past the current beacon */ 1953check_next_scan:
1714 bss_info += beacon_size; 1954 mwifiex_check_next_scan_command(priv);
1715 bytes_left -= beacon_size; 1955 return ret;
1956}
1716 1957
1717 curr_bcn_bytes = beacon_size; 1958/*
1959 * This function prepares an extended scan command to be sent to the firmware
1960 *
1961 * This uses the scan command configuration sent to the command processing
1962 * module in command preparation stage to configure a extended scan command
1963 * structure to send to firmware.
1964 */
1965int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
1966 struct host_cmd_ds_command *cmd,
1967 void *data_buf)
1968{
1969 struct host_cmd_ds_802_11_scan_ext *ext_scan = &cmd->params.ext_scan;
1970 struct mwifiex_scan_cmd_config *scan_cfg = data_buf;
1718 1971
1719 /* 1972 memcpy(ext_scan->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
1720 * First 5 fields are bssid, RSSI, time stamp, beacon interval,
1721 * and capability information
1722 */
1723 if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
1724 dev_err(adapter->dev,
1725 "InterpretIE: not enough bytes left\n");
1726 continue;
1727 }
1728 bcn_param = (struct mwifiex_bcn_param *)current_ptr;
1729 current_ptr += sizeof(*bcn_param);
1730 curr_bcn_bytes -= sizeof(*bcn_param);
1731 1973
1732 memcpy(bssid, bcn_param->bssid, ETH_ALEN); 1974 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN_EXT);
1733 1975
1734 rssi = (s32) bcn_param->rssi; 1976 /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
1735 rssi = (-rssi) * 100; /* Convert dBm to mBm */ 1977 cmd->size = cpu_to_le16((u16)(sizeof(ext_scan->reserved)
1736 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi); 1978 + scan_cfg->tlv_buf_len + S_DS_GEN));
1737 1979
1738 timestamp = le64_to_cpu(bcn_param->timestamp); 1980 return 0;
1739 beacon_period = le16_to_cpu(bcn_param->beacon_period); 1981}
1740 1982
1741 cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap); 1983/* This function handles the command response of extended scan */
1742 dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n", 1984int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
1743 cap_info_bitmap); 1985{
1986 dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
1987 return 0;
1988}
1744 1989
1745 /* Rest of the current buffer are IE's */ 1990/* This function This function handles the event extended scan report. It
1746 ie_buf = current_ptr; 1991 * parses extended scan results and informs to cfg80211 stack.
1747 ie_len = curr_bcn_bytes; 1992 */
1748 dev_dbg(adapter->dev, 1993int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
1749 "info: InterpretIE: IELength for this AP = %d\n", 1994 void *buf)
1750 curr_bcn_bytes); 1995{
1996 int ret = 0;
1997 struct mwifiex_adapter *adapter = priv->adapter;
1998 u8 *bss_info;
1999 u32 bytes_left, bytes_left_for_tlv, idx;
2000 u16 type, len;
2001 struct mwifiex_ie_types_data *tlv;
2002 struct mwifiex_ie_types_bss_scan_rsp *scan_rsp_tlv;
2003 struct mwifiex_ie_types_bss_scan_info *scan_info_tlv;
2004 u8 *radio_type;
2005 u64 fw_tsf = 0;
2006 s32 rssi = 0;
2007 struct mwifiex_event_scan_result *event_scan = buf;
2008 u8 num_of_set = event_scan->num_of_set;
2009 u8 *scan_resp = buf + sizeof(struct mwifiex_event_scan_result);
2010 u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
2011
2012 if (num_of_set > MWIFIEX_MAX_AP) {
2013 dev_err(adapter->dev,
2014 "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
2015 num_of_set);
2016 ret = -1;
2017 goto check_next_scan;
2018 }
1751 2019
1752 while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) { 2020 bytes_left = scan_resp_size;
1753 u8 element_id, element_len; 2021 dev_dbg(adapter->dev,
2022 "EXT_SCAN: size %d, returned %d APs...",
2023 scan_resp_size, num_of_set);
1754 2024
1755 element_id = *current_ptr; 2025 tlv = (struct mwifiex_ie_types_data *)scan_resp;
1756 element_len = *(current_ptr + 1);
1757 if (curr_bcn_bytes < element_len +
1758 sizeof(struct ieee_types_header)) {
1759 dev_err(priv->adapter->dev,
1760 "%s: bytes left < IE length\n",
1761 __func__);
1762 goto check_next_scan;
1763 }
1764 if (element_id == WLAN_EID_DS_PARAMS) {
1765 channel = *(current_ptr + sizeof(struct ieee_types_header));
1766 break;
1767 }
1768 2026
1769 current_ptr += element_len + 2027 for (idx = 0; idx < num_of_set && bytes_left; idx++) {
1770 sizeof(struct ieee_types_header); 2028 type = le16_to_cpu(tlv->header.type);
1771 curr_bcn_bytes -= element_len + 2029 len = le16_to_cpu(tlv->header.len);
1772 sizeof(struct ieee_types_header); 2030 if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
2031 dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
2032 break;
1773 } 2033 }
2034 scan_rsp_tlv = NULL;
2035 scan_info_tlv = NULL;
2036 bytes_left_for_tlv = bytes_left;
1774 2037
1775 /* 2038 /* BSS response TLV with beacon or probe response buffer
1776 * If the TSF TLV was appended to the scan results, save this 2039 * at the initial position of each descriptor
1777 * entry's TSF value in the fw_tsf field. It is the firmware's
1778 * TSF value at the time the beacon or probe response was
1779 * received.
1780 */ 2040 */
1781 if (tsf_tlv) 2041 if (type != TLV_TYPE_BSS_SCAN_RSP)
1782 memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE], 2042 break;
1783 sizeof(fw_tsf));
1784
1785 if (channel) {
1786 struct ieee80211_channel *chan;
1787 u8 band;
1788 2043
1789 /* Skip entry if on csa closed channel */ 2044 bss_info = (u8 *)tlv;
1790 if (channel == priv->csa_chan) { 2045 scan_rsp_tlv = (struct mwifiex_ie_types_bss_scan_rsp *)tlv;
1791 dev_dbg(adapter->dev, 2046 tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
1792 "Dropping entry on csa closed channel\n"); 2047 bytes_left_for_tlv -=
2048 (len + sizeof(struct mwifiex_ie_types_header));
2049
2050 while (bytes_left_for_tlv >=
2051 sizeof(struct mwifiex_ie_types_header) &&
2052 le16_to_cpu(tlv->header.type) != TLV_TYPE_BSS_SCAN_RSP) {
2053 type = le16_to_cpu(tlv->header.type);
2054 len = le16_to_cpu(tlv->header.len);
2055 if (bytes_left_for_tlv <
2056 sizeof(struct mwifiex_ie_types_header) + len) {
2057 dev_err(adapter->dev,
2058 "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
2059 scan_rsp_tlv = NULL;
2060 bytes_left_for_tlv = 0;
1793 continue; 2061 continue;
1794 } 2062 }
1795 2063 switch (type) {
1796 band = BAND_G; 2064 case TLV_TYPE_BSS_SCAN_INFO:
1797 if (chan_band_tlv) { 2065 scan_info_tlv =
1798 chan_band = 2066 (struct mwifiex_ie_types_bss_scan_info *)tlv;
1799 &chan_band_tlv->chan_band_param[idx]; 2067 if (len !=
1800 band = mwifiex_radio_type_to_band( 2068 sizeof(struct mwifiex_ie_types_bss_scan_info) -
1801 chan_band->radio_type 2069 sizeof(struct mwifiex_ie_types_header)) {
1802 & (BIT(0) | BIT(1))); 2070 bytes_left_for_tlv = 0;
1803 } 2071 continue;
1804 2072 }
1805 cfp = mwifiex_get_cfp(priv, band, channel, 0); 2073 break;
1806 2074 default:
1807 freq = cfp ? cfp->freq : 0; 2075 break;
1808
1809 chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
1810
1811 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
1812 bss = cfg80211_inform_bss(priv->wdev->wiphy,
1813 chan, bssid, timestamp,
1814 cap_info_bitmap, beacon_period,
1815 ie_buf, ie_len, rssi, GFP_KERNEL);
1816 bss_priv = (struct mwifiex_bss_priv *)bss->priv;
1817 bss_priv->band = band;
1818 bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
1819 if (priv->media_connected &&
1820 !memcmp(bssid,
1821 priv->curr_bss_params.bss_descriptor
1822 .mac_address, ETH_ALEN))
1823 mwifiex_update_curr_bss_params(priv,
1824 bss);
1825 cfg80211_put_bss(priv->wdev->wiphy, bss);
1826 } 2076 }
1827 } else { 2077 tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
1828 dev_dbg(adapter->dev, "missing BSS channel IE\n"); 2078 bytes_left -=
2079 (len + sizeof(struct mwifiex_ie_types_header));
2080 bytes_left_for_tlv -=
2081 (len + sizeof(struct mwifiex_ie_types_header));
1829 } 2082 }
1830 }
1831 2083
1832check_next_scan: 2084 if (!scan_rsp_tlv)
1833 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 2085 break;
1834 if (list_empty(&adapter->scan_pending_q)) {
1835 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
1836 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
1837 adapter->scan_processing = false;
1838 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1839 2086
1840 /* Need to indicate IOCTL complete */ 2087 /* Advance pointer to the beacon buffer length and
1841 if (adapter->curr_cmd->wait_q_enabled) { 2088 * update the bytes count so that the function
1842 adapter->cmd_wait_q.status = 0; 2089 * wlan_interpret_bss_desc_with_ie() can handle the
1843 if (!priv->scan_request) { 2090 * scan buffer withut any change
1844 dev_dbg(adapter->dev, 2091 */
1845 "complete internal scan\n"); 2092 bss_info += sizeof(u16);
1846 mwifiex_complete_cmd(adapter, 2093 bytes_left -= sizeof(u16);
1847 adapter->curr_cmd);
1848 }
1849 }
1850 if (priv->report_scan_result)
1851 priv->report_scan_result = false;
1852 2094
1853 if (priv->scan_request) { 2095 if (scan_info_tlv) {
1854 dev_dbg(adapter->dev, "info: notifying scan done\n"); 2096 rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
1855 cfg80211_scan_done(priv->scan_request, 0); 2097 rssi *= 100; /* Convert dBm to mBm */
1856 priv->scan_request = NULL; 2098 dev_dbg(adapter->dev,
1857 } else { 2099 "info: InterpretIE: RSSI=%d\n", rssi);
1858 priv->scan_aborting = false; 2100 fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
1859 dev_dbg(adapter->dev, "info: scan already aborted\n"); 2101 radio_type = &scan_info_tlv->radio_type;
1860 }
1861 } else {
1862 if ((priv->scan_aborting && !priv->scan_request) ||
1863 priv->scan_block) {
1864 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1865 flags);
1866 adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
1867 mod_timer(&priv->scan_delay_timer, jiffies);
1868 dev_dbg(priv->adapter->dev,
1869 "info: %s: triggerring scan abort\n", __func__);
1870 } else if (!mwifiex_wmm_lists_empty(adapter) &&
1871 (priv->scan_request && (priv->scan_request->flags &
1872 NL80211_SCAN_FLAG_LOW_PRIORITY))) {
1873 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1874 flags);
1875 adapter->scan_delay_cnt = 1;
1876 mod_timer(&priv->scan_delay_timer, jiffies +
1877 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
1878 dev_dbg(priv->adapter->dev,
1879 "info: %s: deferring scan\n", __func__);
1880 } else { 2102 } else {
1881 /* Get scan command from scan_pending_q and put to 2103 radio_type = NULL;
1882 cmd_pending_q */
1883 cmd_node = list_first_entry(&adapter->scan_pending_q,
1884 struct cmd_ctrl_node, list);
1885 list_del(&cmd_node->list);
1886 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1887 flags);
1888 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1889 true);
1890 } 2104 }
2105 ret = mwifiex_parse_single_response_buf(priv, &bss_info,
2106 &bytes_left, fw_tsf,
2107 radio_type, true, rssi);
2108 if (ret)
2109 goto check_next_scan;
1891 } 2110 }
1892 2111
2112check_next_scan:
2113 if (!event_scan->more_event)
2114 mwifiex_check_next_scan_command(priv);
2115
1893 return ret; 2116 return ret;
1894} 2117}
1895 2118
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index b44a31523461..d206f04d4994 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -84,6 +84,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
84 card->mp_agg_pkt_limit = data->mp_agg_pkt_limit; 84 card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
85 card->supports_sdio_new_mode = data->supports_sdio_new_mode; 85 card->supports_sdio_new_mode = data->supports_sdio_new_mode;
86 card->has_control_mask = data->has_control_mask; 86 card->has_control_mask = data->has_control_mask;
87 card->tx_buf_size = data->tx_buf_size;
87 } 88 }
88 89
89 sdio_claim_host(func); 90 sdio_claim_host(func);
@@ -165,7 +166,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
165 struct sdio_mmc_card *card; 166 struct sdio_mmc_card *card;
166 struct mwifiex_adapter *adapter; 167 struct mwifiex_adapter *adapter;
167 struct mwifiex_private *priv; 168 struct mwifiex_private *priv;
168 int i;
169 169
170 pr_debug("info: SDIO func num=%d\n", func->num); 170 pr_debug("info: SDIO func num=%d\n", func->num);
171 171
@@ -184,11 +184,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
184 if (adapter->is_suspended) 184 if (adapter->is_suspended)
185 mwifiex_sdio_resume(adapter->dev); 185 mwifiex_sdio_resume(adapter->dev);
186 186
187 for (i = 0; i < adapter->priv_num; i++) 187 mwifiex_deauthenticate_all(adapter);
188 if ((GET_BSS_ROLE(adapter->priv[i]) ==
189 MWIFIEX_BSS_ROLE_STA) &&
190 adapter->priv[i]->media_connected)
191 mwifiex_deauthenticate(adapter->priv[i], NULL);
192 188
193 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 189 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
194 mwifiex_disable_auto_ds(priv); 190 mwifiex_disable_auto_ds(priv);
@@ -241,6 +237,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
241 /* Enable the Host Sleep */ 237 /* Enable the Host Sleep */
242 if (!mwifiex_enable_hs(adapter)) { 238 if (!mwifiex_enable_hs(adapter)) {
243 dev_err(adapter->dev, "cmd: failed to suspend\n"); 239 dev_err(adapter->dev, "cmd: failed to suspend\n");
240 adapter->hs_enabling = false;
244 return -EFAULT; 241 return -EFAULT;
245 } 242 }
246 243
@@ -249,6 +246,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
249 246
250 /* Indicate device suspended */ 247 /* Indicate device suspended */
251 adapter->is_suspended = true; 248 adapter->is_suspended = true;
249 adapter->hs_enabling = false;
252 250
253 return ret; 251 return ret;
254} 252}
@@ -1760,6 +1758,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1760 1758
1761 /* save adapter pointer in card */ 1759 /* save adapter pointer in card */
1762 card->adapter = adapter; 1760 card->adapter = adapter;
1761 adapter->tx_buf_size = card->tx_buf_size;
1763 1762
1764 sdio_claim_host(func); 1763 sdio_claim_host(func);
1765 1764
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 532ae0ac4dfb..c71201b2e2a3 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -233,6 +233,7 @@ struct sdio_mmc_card {
233 u8 mp_agg_pkt_limit; 233 u8 mp_agg_pkt_limit;
234 bool supports_sdio_new_mode; 234 bool supports_sdio_new_mode;
235 bool has_control_mask; 235 bool has_control_mask;
236 u16 tx_buf_size;
236 237
237 u32 mp_rd_bitmap; 238 u32 mp_rd_bitmap;
238 u32 mp_wr_bitmap; 239 u32 mp_wr_bitmap;
@@ -256,6 +257,7 @@ struct mwifiex_sdio_device {
256 u8 mp_agg_pkt_limit; 257 u8 mp_agg_pkt_limit;
257 bool supports_sdio_new_mode; 258 bool supports_sdio_new_mode;
258 bool has_control_mask; 259 bool has_control_mask;
260 u16 tx_buf_size;
259}; 261};
260 262
261static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { 263static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -312,6 +314,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
312 .mp_agg_pkt_limit = 8, 314 .mp_agg_pkt_limit = 8,
313 .supports_sdio_new_mode = false, 315 .supports_sdio_new_mode = false,
314 .has_control_mask = true, 316 .has_control_mask = true,
317 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
315}; 318};
316 319
317static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { 320static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -321,6 +324,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
321 .mp_agg_pkt_limit = 8, 324 .mp_agg_pkt_limit = 8,
322 .supports_sdio_new_mode = false, 325 .supports_sdio_new_mode = false,
323 .has_control_mask = true, 326 .has_control_mask = true,
327 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
324}; 328};
325 329
326static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { 330static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -330,6 +334,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
330 .mp_agg_pkt_limit = 8, 334 .mp_agg_pkt_limit = 8,
331 .supports_sdio_new_mode = false, 335 .supports_sdio_new_mode = false,
332 .has_control_mask = true, 336 .has_control_mask = true,
337 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
333}; 338};
334 339
335static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { 340static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -339,6 +344,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
339 .mp_agg_pkt_limit = 16, 344 .mp_agg_pkt_limit = 16,
340 .supports_sdio_new_mode = true, 345 .supports_sdio_new_mode = true,
341 .has_control_mask = false, 346 .has_control_mask = false,
347 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
342}; 348};
343 349
344/* 350/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 9208a8816b80..e3cac1495cc7 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -185,6 +185,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
185 i++) 185 i++)
186 rate_scope->ht_mcs_rate_bitmap[i] = 186 rate_scope->ht_mcs_rate_bitmap[i] =
187 cpu_to_le16(pbitmap_rates[2 + i]); 187 cpu_to_le16(pbitmap_rates[2 + i]);
188 if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
189 for (i = 0;
190 i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
191 i++)
192 rate_scope->vht_mcs_rate_bitmap[i] =
193 cpu_to_le16(pbitmap_rates[10 + i]);
194 }
188 } else { 195 } else {
189 rate_scope->hr_dsss_rate_bitmap = 196 rate_scope->hr_dsss_rate_bitmap =
190 cpu_to_le16(priv->bitmap_rates[0]); 197 cpu_to_le16(priv->bitmap_rates[0]);
@@ -195,6 +202,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
195 i++) 202 i++)
196 rate_scope->ht_mcs_rate_bitmap[i] = 203 rate_scope->ht_mcs_rate_bitmap[i] =
197 cpu_to_le16(priv->bitmap_rates[2 + i]); 204 cpu_to_le16(priv->bitmap_rates[2 + i]);
205 if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
206 for (i = 0;
207 i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
208 i++)
209 rate_scope->vht_mcs_rate_bitmap[i] =
210 cpu_to_le16(priv->bitmap_rates[10 + i]);
211 }
198 } 212 }
199 213
200 rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope + 214 rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
@@ -532,8 +546,228 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
532 return 0; 546 return 0;
533} 547}
534 548
549/* This function populates key material v2 command
550 * to set network key for AES & CMAC AES.
551 */
552static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
553 struct host_cmd_ds_command *cmd,
554 struct mwifiex_ds_encrypt_key *enc_key,
555 struct host_cmd_ds_802_11_key_material_v2 *km)
556{
557 struct mwifiex_adapter *adapter = priv->adapter;
558 u16 size, len = KEY_PARAMS_FIXED_LEN;
559
560 if (enc_key->is_igtk_key) {
561 dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
562 if (enc_key->is_rx_seq_valid)
563 memcpy(km->key_param_set.key_params.cmac_aes.ipn,
564 enc_key->pn, enc_key->pn_len);
565 km->key_param_set.key_info &= cpu_to_le16(~KEY_MCAST);
566 km->key_param_set.key_info |= cpu_to_le16(KEY_IGTK);
567 km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC;
568 km->key_param_set.key_params.cmac_aes.key_len =
569 cpu_to_le16(enc_key->key_len);
570 memcpy(km->key_param_set.key_params.cmac_aes.key,
571 enc_key->key_material, enc_key->key_len);
572 len += sizeof(struct mwifiex_cmac_aes_param);
573 } else {
574 dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
575 if (enc_key->is_rx_seq_valid)
576 memcpy(km->key_param_set.key_params.aes.pn,
577 enc_key->pn, enc_key->pn_len);
578 km->key_param_set.key_type = KEY_TYPE_ID_AES;
579 km->key_param_set.key_params.aes.key_len =
580 cpu_to_le16(enc_key->key_len);
581 memcpy(km->key_param_set.key_params.aes.key,
582 enc_key->key_material, enc_key->key_len);
583 len += sizeof(struct mwifiex_aes_param);
584 }
585
586 km->key_param_set.len = cpu_to_le16(len);
587 size = len + sizeof(struct mwifiex_ie_types_header) +
588 sizeof(km->action) + S_DS_GEN;
589 cmd->size = cpu_to_le16(size);
590
591 return 0;
592}
593
594/* This function prepares command to set/get/reset network key(s).
595 * This function prepares key material command for V2 format.
596 * Preparation includes -
597 * - Setting command ID, action and proper size
598 * - Setting WEP keys, WAPI keys or WPA keys along with required
599 * encryption (TKIP, AES) (as required)
600 * - Ensuring correct endian-ness
601 */
602static int
603mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
604 struct host_cmd_ds_command *cmd,
605 u16 cmd_action, u32 cmd_oid,
606 struct mwifiex_ds_encrypt_key *enc_key)
607{
608 struct mwifiex_adapter *adapter = priv->adapter;
609 u8 *mac = enc_key->mac_addr;
610 u16 key_info, len = KEY_PARAMS_FIXED_LEN;
611 struct host_cmd_ds_802_11_key_material_v2 *km =
612 &cmd->params.key_material_v2;
613
614 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
615 km->action = cpu_to_le16(cmd_action);
616
617 if (cmd_action == HostCmd_ACT_GEN_GET) {
618 dev_dbg(adapter->dev, "%s: Get key\n", __func__);
619 km->key_param_set.key_idx =
620 enc_key->key_index & KEY_INDEX_MASK;
621 km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
622 km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
623 memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
624
625 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
626 key_info = KEY_UNICAST;
627 else
628 key_info = KEY_MCAST;
629
630 if (enc_key->is_igtk_key)
631 key_info |= KEY_IGTK;
632
633 km->key_param_set.key_info = cpu_to_le16(key_info);
634
635 cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
636 S_DS_GEN + KEY_PARAMS_FIXED_LEN +
637 sizeof(km->action));
638 return 0;
639 }
640
641 memset(&km->key_param_set, 0,
642 sizeof(struct mwifiex_ie_type_key_param_set_v2));
643
644 if (enc_key->key_disable) {
645 dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
646 km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
647 km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
648 km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
649 km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
650 key_info = KEY_MCAST | KEY_UNICAST;
651 km->key_param_set.key_info = cpu_to_le16(key_info);
652 memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
653 cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
654 S_DS_GEN + KEY_PARAMS_FIXED_LEN +
655 sizeof(km->action));
656 return 0;
657 }
658
659 km->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
660 km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
661 km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
662 key_info = KEY_ENABLED;
663 memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
664
665 if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
666 dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
667 len += sizeof(struct mwifiex_wep_param);
668 km->key_param_set.len = cpu_to_le16(len);
669 km->key_param_set.key_type = KEY_TYPE_ID_WEP;
670
671 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
672 key_info |= KEY_MCAST | KEY_UNICAST;
673 } else {
674 if (enc_key->is_current_wep_key) {
675 key_info |= KEY_MCAST | KEY_UNICAST;
676 if (km->key_param_set.key_idx ==
677 (priv->wep_key_curr_index & KEY_INDEX_MASK))
678 key_info |= KEY_DEFAULT;
679 } else {
680 if (mac) {
681 if (is_broadcast_ether_addr(mac))
682 key_info |= KEY_MCAST;
683 else
684 key_info |= KEY_UNICAST |
685 KEY_DEFAULT;
686 } else {
687 key_info |= KEY_MCAST;
688 }
689 }
690 }
691 km->key_param_set.key_info = cpu_to_le16(key_info);
692
693 km->key_param_set.key_params.wep.key_len =
694 cpu_to_le16(enc_key->key_len);
695 memcpy(km->key_param_set.key_params.wep.key,
696 enc_key->key_material, enc_key->key_len);
697
698 cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
699 len + sizeof(km->action) + S_DS_GEN);
700 return 0;
701 }
702
703 if (is_broadcast_ether_addr(mac))
704 key_info |= KEY_MCAST | KEY_RX_KEY;
705 else
706 key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
707
708 if (enc_key->is_wapi_key) {
709 dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
710 km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
711 memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
712 PN_LEN);
713 km->key_param_set.key_params.wapi.key_len =
714 cpu_to_le16(enc_key->key_len);
715 memcpy(km->key_param_set.key_params.wapi.key,
716 enc_key->key_material, enc_key->key_len);
717 if (is_broadcast_ether_addr(mac))
718 priv->sec_info.wapi_key_on = true;
719
720 if (!priv->sec_info.wapi_key_on)
721 key_info |= KEY_DEFAULT;
722 km->key_param_set.key_info = cpu_to_le16(key_info);
723
724 len += sizeof(struct mwifiex_wapi_param);
725 km->key_param_set.len = cpu_to_le16(len);
726 cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
727 len + sizeof(km->action) + S_DS_GEN);
728 return 0;
729 }
730
731 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
732 key_info |= KEY_DEFAULT;
733 /* Enable unicast bit for WPA-NONE/ADHOC_AES */
734 if (!priv->sec_info.wpa2_enabled &&
735 !is_broadcast_ether_addr(mac))
736 key_info |= KEY_UNICAST;
737 } else {
738 /* Enable default key for WPA/WPA2 */
739 if (!priv->wpa_is_gtk_set)
740 key_info |= KEY_DEFAULT;
741 }
742
743 km->key_param_set.key_info = cpu_to_le16(key_info);
744
745 if (enc_key->key_len == WLAN_KEY_LEN_CCMP)
746 return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
747
748 if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
749 dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
750 if (enc_key->is_rx_seq_valid)
751 memcpy(km->key_param_set.key_params.tkip.pn,
752 enc_key->pn, enc_key->pn_len);
753 km->key_param_set.key_type = KEY_TYPE_ID_TKIP;
754 km->key_param_set.key_params.tkip.key_len =
755 cpu_to_le16(enc_key->key_len);
756 memcpy(km->key_param_set.key_params.tkip.key,
757 enc_key->key_material, enc_key->key_len);
758
759 len += sizeof(struct mwifiex_tkip_param);
760 km->key_param_set.len = cpu_to_le16(len);
761 cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
762 len + sizeof(km->action) + S_DS_GEN);
763 }
764
765 return 0;
766}
767
535/* 768/*
536 * This function prepares command to set/get/reset network key(s). 769 * This function prepares command to set/get/reset network key(s).
770 * This function prepares key material command for V1 format.
537 * 771 *
538 * Preparation includes - 772 * Preparation includes -
539 * - Setting command ID, action and proper size 773 * - Setting command ID, action and proper size
@@ -542,10 +776,10 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
542 * - Ensuring correct endian-ness 776 * - Ensuring correct endian-ness
543 */ 777 */
544static int 778static int
545mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv, 779mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
546 struct host_cmd_ds_command *cmd, 780 struct host_cmd_ds_command *cmd,
547 u16 cmd_action, u32 cmd_oid, 781 u16 cmd_action, u32 cmd_oid,
548 struct mwifiex_ds_encrypt_key *enc_key) 782 struct mwifiex_ds_encrypt_key *enc_key)
549{ 783{
550 struct host_cmd_ds_802_11_key_material *key_material = 784 struct host_cmd_ds_802_11_key_material *key_material =
551 &cmd->params.key_material; 785 &cmd->params.key_material;
@@ -724,6 +958,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
724 return ret; 958 return ret;
725} 959}
726 960
961/* Wrapper function for setting network key depending upon FW KEY API version */
962static int
963mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
964 struct host_cmd_ds_command *cmd,
965 u16 cmd_action, u32 cmd_oid,
966 struct mwifiex_ds_encrypt_key *enc_key)
967{
968 if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
969 return mwifiex_cmd_802_11_key_material_v2(priv, cmd,
970 cmd_action, cmd_oid,
971 enc_key);
972
973 else
974 return mwifiex_cmd_802_11_key_material_v1(priv, cmd,
975 cmd_action, cmd_oid,
976 enc_key);
977}
978
727/* 979/*
728 * This function prepares command to set/get 11d domain information. 980 * This function prepares command to set/get 11d domain information.
729 * 981 *
@@ -1173,9 +1425,9 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
1173 /* property header is 6 bytes, data must fit in cmd buffer */ 1425 /* property header is 6 bytes, data must fit in cmd buffer */
1174 if (prop && prop->value && prop->length > 6 && 1426 if (prop && prop->value && prop->length > 6 &&
1175 prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) { 1427 prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
1176 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA, 1428 ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
1177 HostCmd_ACT_GEN_SET, 0, 1429 HostCmd_ACT_GEN_SET, 0,
1178 prop); 1430 prop, true);
1179 if (ret) 1431 if (ret)
1180 return ret; 1432 return ret;
1181 } 1433 }
@@ -1280,6 +1532,127 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
1280 return 0; 1532 return 0;
1281} 1533}
1282 1534
1535static int
1536mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
1537 struct host_cmd_ds_command *cmd,
1538 void *data_buf)
1539{
1540 struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
1541 struct mwifiex_ds_tdls_oper *oper = data_buf;
1542 struct mwifiex_sta_node *sta_ptr;
1543 struct host_cmd_tlv_rates *tlv_rates;
1544 struct mwifiex_ie_types_htcap *ht_capab;
1545 struct mwifiex_ie_types_qos_info *wmm_qos_info;
1546 struct mwifiex_ie_types_extcap *extcap;
1547 struct mwifiex_ie_types_vhtcap *vht_capab;
1548 struct mwifiex_ie_types_aid *aid;
1549 u8 *pos, qos_info;
1550 u16 config_len = 0;
1551 struct station_parameters *params = priv->sta_params;
1552
1553 cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
1554 cmd->size = cpu_to_le16(S_DS_GEN);
1555 le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
1556
1557 tdls_oper->reason = 0;
1558 memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
1559 sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
1560
1561 pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
1562
1563 switch (oper->tdls_action) {
1564 case MWIFIEX_TDLS_DISABLE_LINK:
1565 tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_DELETE);
1566 break;
1567 case MWIFIEX_TDLS_CREATE_LINK:
1568 tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CREATE);
1569 break;
1570 case MWIFIEX_TDLS_CONFIG_LINK:
1571 tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
1572
1573 if (!params) {
1574 dev_err(priv->adapter->dev,
1575 "TDLS config params not available for %pM\n",
1576 oper->peer_mac);
1577 return -ENODATA;
1578 }
1579
1580 *(__le16 *)pos = cpu_to_le16(params->capability);
1581 config_len += sizeof(params->capability);
1582
1583 qos_info = params->uapsd_queues | (params->max_sp << 5);
1584 wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos +
1585 config_len);
1586 wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
1587 wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info));
1588 wmm_qos_info->qos_info = qos_info;
1589 config_len += sizeof(struct mwifiex_ie_types_qos_info);
1590
1591 if (params->ht_capa) {
1592 ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
1593 config_len);
1594 ht_capab->header.type =
1595 cpu_to_le16(WLAN_EID_HT_CAPABILITY);
1596 ht_capab->header.len =
1597 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
1598 memcpy(&ht_capab->ht_cap, params->ht_capa,
1599 sizeof(struct ieee80211_ht_cap));
1600 config_len += sizeof(struct mwifiex_ie_types_htcap);
1601 }
1602
1603 if (params->supported_rates && params->supported_rates_len) {
1604 tlv_rates = (struct host_cmd_tlv_rates *)(pos +
1605 config_len);
1606 tlv_rates->header.type =
1607 cpu_to_le16(WLAN_EID_SUPP_RATES);
1608 tlv_rates->header.len =
1609 cpu_to_le16(params->supported_rates_len);
1610 memcpy(tlv_rates->rates, params->supported_rates,
1611 params->supported_rates_len);
1612 config_len += sizeof(struct host_cmd_tlv_rates) +
1613 params->supported_rates_len;
1614 }
1615
1616 if (params->ext_capab && params->ext_capab_len) {
1617 extcap = (struct mwifiex_ie_types_extcap *)(pos +
1618 config_len);
1619 extcap->header.type =
1620 cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
1621 extcap->header.len = cpu_to_le16(params->ext_capab_len);
1622 memcpy(extcap->ext_capab, params->ext_capab,
1623 params->ext_capab_len);
1624 config_len += sizeof(struct mwifiex_ie_types_extcap) +
1625 params->ext_capab_len;
1626 }
1627 if (params->vht_capa) {
1628 vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
1629 config_len);
1630 vht_capab->header.type =
1631 cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
1632 vht_capab->header.len =
1633 cpu_to_le16(sizeof(struct ieee80211_vht_cap));
1634 memcpy(&vht_capab->vht_cap, params->vht_capa,
1635 sizeof(struct ieee80211_vht_cap));
1636 config_len += sizeof(struct mwifiex_ie_types_vhtcap);
1637 }
1638 if (params->aid) {
1639 aid = (struct mwifiex_ie_types_aid *)(pos + config_len);
1640 aid->header.type = cpu_to_le16(WLAN_EID_AID);
1641 aid->header.len = cpu_to_le16(sizeof(params->aid));
1642 aid->aid = cpu_to_le16(params->aid);
1643 config_len += sizeof(struct mwifiex_ie_types_aid);
1644 }
1645
1646 break;
1647 default:
1648 dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
1649 return -ENOTSUPP;
1650 }
1651
1652 le16_add_cpu(&cmd->size, config_len);
1653
1654 return 0;
1655}
1283/* 1656/*
1284 * This function prepares the commands before sending them to the firmware. 1657 * This function prepares the commands before sending them to the firmware.
1285 * 1658 *
@@ -1472,6 +1845,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1472 ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action, 1845 ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action,
1473 data_buf); 1846 data_buf);
1474 break; 1847 break;
1848 case HostCmd_CMD_802_11_SCAN_EXT:
1849 ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
1850 break;
1475 case HostCmd_CMD_MAC_REG_ACCESS: 1851 case HostCmd_CMD_MAC_REG_ACCESS:
1476 case HostCmd_CMD_BBP_REG_ACCESS: 1852 case HostCmd_CMD_BBP_REG_ACCESS:
1477 case HostCmd_CMD_RF_REG_ACCESS: 1853 case HostCmd_CMD_RF_REG_ACCESS:
@@ -1507,6 +1883,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1507 ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action, 1883 ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
1508 data_buf); 1884 data_buf);
1509 break; 1885 break;
1886 case HostCmd_CMD_TDLS_OPER:
1887 ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
1888 break;
1510 default: 1889 default:
1511 dev_err(priv->adapter->dev, 1890 dev_err(priv->adapter->dev,
1512 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1891 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1547,15 +1926,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1547 1926
1548 if (first_sta) { 1927 if (first_sta) {
1549 if (priv->adapter->iface_type == MWIFIEX_PCIE) { 1928 if (priv->adapter->iface_type == MWIFIEX_PCIE) {
1550 ret = mwifiex_send_cmd_sync(priv, 1929 ret = mwifiex_send_cmd(priv,
1551 HostCmd_CMD_PCIE_DESC_DETAILS, 1930 HostCmd_CMD_PCIE_DESC_DETAILS,
1552 HostCmd_ACT_GEN_SET, 0, NULL); 1931 HostCmd_ACT_GEN_SET, 0, NULL,
1932 true);
1553 if (ret) 1933 if (ret)
1554 return -1; 1934 return -1;
1555 } 1935 }
1556 1936
1557 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT, 1937 ret = mwifiex_send_cmd(priv, HostCmd_CMD_FUNC_INIT,
1558 HostCmd_ACT_GEN_SET, 0, NULL); 1938 HostCmd_ACT_GEN_SET, 0, NULL, true);
1559 if (ret) 1939 if (ret)
1560 return -1; 1940 return -1;
1561 1941
@@ -1573,55 +1953,57 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1573 } 1953 }
1574 1954
1575 if (adapter->cal_data) { 1955 if (adapter->cal_data) {
1576 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA, 1956 ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
1577 HostCmd_ACT_GEN_SET, 0, NULL); 1957 HostCmd_ACT_GEN_SET, 0, NULL,
1958 true);
1578 if (ret) 1959 if (ret)
1579 return -1; 1960 return -1;
1580 } 1961 }
1581 1962
1582 /* Read MAC address from HW */ 1963 /* Read MAC address from HW */
1583 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC, 1964 ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
1584 HostCmd_ACT_GEN_GET, 0, NULL); 1965 HostCmd_ACT_GEN_GET, 0, NULL, true);
1585 if (ret) 1966 if (ret)
1586 return -1; 1967 return -1;
1587 1968
1588 /* Reconfigure tx buf size */ 1969 /* Reconfigure tx buf size */
1589 ret = mwifiex_send_cmd_sync(priv, 1970 ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
1590 HostCmd_CMD_RECONFIGURE_TX_BUFF, 1971 HostCmd_ACT_GEN_SET, 0,
1591 HostCmd_ACT_GEN_SET, 0, 1972 &priv->adapter->tx_buf_size, true);
1592 &priv->adapter->tx_buf_size);
1593 if (ret) 1973 if (ret)
1594 return -1; 1974 return -1;
1595 1975
1596 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 1976 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
1597 /* Enable IEEE PS by default */ 1977 /* Enable IEEE PS by default */
1598 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; 1978 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1599 ret = mwifiex_send_cmd_sync( 1979 ret = mwifiex_send_cmd(priv,
1600 priv, HostCmd_CMD_802_11_PS_MODE_ENH, 1980 HostCmd_CMD_802_11_PS_MODE_ENH,
1601 EN_AUTO_PS, BITMAP_STA_PS, NULL); 1981 EN_AUTO_PS, BITMAP_STA_PS, NULL,
1982 true);
1602 if (ret) 1983 if (ret)
1603 return -1; 1984 return -1;
1604 } 1985 }
1605 } 1986 }
1606 1987
1607 /* get tx rate */ 1988 /* get tx rate */
1608 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG, 1989 ret = mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
1609 HostCmd_ACT_GEN_GET, 0, NULL); 1990 HostCmd_ACT_GEN_GET, 0, NULL, true);
1610 if (ret) 1991 if (ret)
1611 return -1; 1992 return -1;
1612 priv->data_rate = 0; 1993 priv->data_rate = 0;
1613 1994
1614 /* get tx power */ 1995 /* get tx power */
1615 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR, 1996 ret = mwifiex_send_cmd(priv, HostCmd_CMD_RF_TX_PWR,
1616 HostCmd_ACT_GEN_GET, 0, NULL); 1997 HostCmd_ACT_GEN_GET, 0, NULL, true);
1617 if (ret) 1998 if (ret)
1618 return -1; 1999 return -1;
1619 2000
1620 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) { 2001 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
1621 /* set ibss coalescing_status */ 2002 /* set ibss coalescing_status */
1622 ret = mwifiex_send_cmd_sync( 2003 ret = mwifiex_send_cmd(
1623 priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, 2004 priv,
1624 HostCmd_ACT_GEN_SET, 0, &enable); 2005 HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
2006 HostCmd_ACT_GEN_SET, 0, &enable, true);
1625 if (ret) 2007 if (ret)
1626 return -1; 2008 return -1;
1627 } 2009 }
@@ -1629,16 +2011,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1629 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl)); 2011 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
1630 amsdu_aggr_ctrl.enable = true; 2012 amsdu_aggr_ctrl.enable = true;
1631 /* Send request to firmware */ 2013 /* Send request to firmware */
1632 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL, 2014 ret = mwifiex_send_cmd(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
1633 HostCmd_ACT_GEN_SET, 0, 2015 HostCmd_ACT_GEN_SET, 0,
1634 &amsdu_aggr_ctrl); 2016 &amsdu_aggr_ctrl, true);
1635 if (ret) 2017 if (ret)
1636 return -1; 2018 return -1;
1637 /* MAC Control must be the last command in init_fw */ 2019 /* MAC Control must be the last command in init_fw */
1638 /* set MAC Control */ 2020 /* set MAC Control */
1639 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL, 2021 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
1640 HostCmd_ACT_GEN_SET, 0, 2022 HostCmd_ACT_GEN_SET, 0,
1641 &priv->curr_pkt_filter); 2023 &priv->curr_pkt_filter, true);
1642 if (ret) 2024 if (ret)
1643 return -1; 2025 return -1;
1644 2026
@@ -1647,10 +2029,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1647 /* Enable auto deep sleep */ 2029 /* Enable auto deep sleep */
1648 auto_ds.auto_ds = DEEP_SLEEP_ON; 2030 auto_ds.auto_ds = DEEP_SLEEP_ON;
1649 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; 2031 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
1650 ret = mwifiex_send_cmd_sync(priv, 2032 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
1651 HostCmd_CMD_802_11_PS_MODE_ENH, 2033 EN_AUTO_PS, BITMAP_AUTO_DS,
1652 EN_AUTO_PS, BITMAP_AUTO_DS, 2034 &auto_ds, true);
1653 &auto_ds);
1654 if (ret) 2035 if (ret)
1655 return -1; 2036 return -1;
1656 } 2037 }
@@ -1658,9 +2039,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1658 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 2039 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
1659 /* Send cmd to FW to enable/disable 11D function */ 2040 /* Send cmd to FW to enable/disable 11D function */
1660 state_11d = ENABLE_11D; 2041 state_11d = ENABLE_11D;
1661 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, 2042 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
1662 HostCmd_ACT_GEN_SET, DOT11D_I, 2043 HostCmd_ACT_GEN_SET, DOT11D_I,
1663 &state_11d); 2044 &state_11d, true);
1664 if (ret) 2045 if (ret)
1665 dev_err(priv->adapter->dev, 2046 dev_err(priv->adapter->dev,
1666 "11D: failed to enable 11D\n"); 2047 "11D: failed to enable 11D\n");
@@ -1673,8 +2054,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1673 * (Short GI, Channel BW, Green field support etc.) for transmit 2054 * (Short GI, Channel BW, Green field support etc.) for transmit
1674 */ 2055 */
1675 tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG; 2056 tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
1676 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG, 2057 ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
1677 HostCmd_ACT_GEN_SET, 0, &tx_cfg); 2058 HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
1678 2059
1679 ret = -EINPROGRESS; 2060 ret = -EINPROGRESS;
1680 2061
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 24523e4015cb..bfebb0144df5 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -69,6 +69,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
69 69
70 break; 70 break;
71 case HostCmd_CMD_802_11_SCAN: 71 case HostCmd_CMD_802_11_SCAN:
72 case HostCmd_CMD_802_11_SCAN_EXT:
72 /* Cancel all pending scan command */ 73 /* Cancel all pending scan command */
73 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 74 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
74 list_for_each_entry_safe(cmd_node, tmp_node, 75 list_for_each_entry_safe(cmd_node, tmp_node,
@@ -157,8 +158,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
157 158
158 priv->subsc_evt_rssi_state = EVENT_HANDLED; 159 priv->subsc_evt_rssi_state = EVENT_HANDLED;
159 160
160 mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT, 161 mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
161 0, 0, subsc_evt); 162 0, 0, subsc_evt, false);
162 163
163 return 0; 164 return 0;
164} 165}
@@ -303,6 +304,15 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
303 priv->bitmap_rates[2 + i] = 304 priv->bitmap_rates[2 + i] =
304 le16_to_cpu(rate_scope-> 305 le16_to_cpu(rate_scope->
305 ht_mcs_rate_bitmap[i]); 306 ht_mcs_rate_bitmap[i]);
307
308 if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
309 for (i = 0; i < ARRAY_SIZE(rate_scope->
310 vht_mcs_rate_bitmap);
311 i++)
312 priv->bitmap_rates[10 + i] =
313 le16_to_cpu(rate_scope->
314 vht_mcs_rate_bitmap[i]);
315 }
306 break; 316 break;
307 /* Add RATE_DROP tlv here */ 317 /* Add RATE_DROP tlv here */
308 } 318 }
@@ -316,9 +326,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
316 if (priv->is_data_rate_auto) 326 if (priv->is_data_rate_auto)
317 priv->data_rate = 0; 327 priv->data_rate = 0;
318 else 328 else
319 return mwifiex_send_cmd_async(priv, 329 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
320 HostCmd_CMD_802_11_TX_RATE_QUERY, 330 HostCmd_ACT_GEN_GET, 0, NULL, false);
321 HostCmd_ACT_GEN_GET, 0, NULL);
322 331
323 return 0; 332 return 0;
324} 333}
@@ -561,13 +570,13 @@ static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
561} 570}
562 571
563/* 572/*
564 * This function handles the command response of set/get key material. 573 * This function handles the command response of set/get v1 key material.
565 * 574 *
566 * Handling includes updating the driver parameters to reflect the 575 * Handling includes updating the driver parameters to reflect the
567 * changes. 576 * changes.
568 */ 577 */
569static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv, 578static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
570 struct host_cmd_ds_command *resp) 579 struct host_cmd_ds_command *resp)
571{ 580{
572 struct host_cmd_ds_802_11_key_material *key = 581 struct host_cmd_ds_802_11_key_material *key =
573 &resp->params.key_material; 582 &resp->params.key_material;
@@ -590,6 +599,51 @@ static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
590} 599}
591 600
592/* 601/*
602 * This function handles the command response of set/get v2 key material.
603 *
604 * Handling includes updating the driver parameters to reflect the
605 * changes.
606 */
607static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
608 struct host_cmd_ds_command *resp)
609{
610 struct host_cmd_ds_802_11_key_material_v2 *key_v2;
611 __le16 len;
612
613 key_v2 = &resp->params.key_material_v2;
614 if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
615 if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
616 dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
617 priv->wpa_is_gtk_set = true;
618 priv->scan_block = false;
619 }
620 }
621
622 if (key_v2->key_param_set.key_type != KEY_TYPE_ID_AES)
623 return 0;
624
625 memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
626 WLAN_KEY_LEN_CCMP);
627 priv->aes_key_v2.key_param_set.key_params.aes.key_len =
628 key_v2->key_param_set.key_params.aes.key_len;
629 len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
630 memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
631 key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
632
633 return 0;
634}
635
636/* Wrapper function for processing response of key material command */
637static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
638 struct host_cmd_ds_command *resp)
639{
640 if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
641 return mwifiex_ret_802_11_key_material_v2(priv, resp);
642 else
643 return mwifiex_ret_802_11_key_material_v1(priv, resp);
644}
645
646/*
593 * This function handles the command response of get 11d domain information. 647 * This function handles the command response of get 11d domain information.
594 */ 648 */
595static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv, 649static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
@@ -800,7 +854,60 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
800 854
801 return 0; 855 return 0;
802} 856}
857static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
858 struct host_cmd_ds_command *resp)
859{
860 struct host_cmd_ds_tdls_oper *cmd_tdls_oper = &resp->params.tdls_oper;
861 u16 reason = le16_to_cpu(cmd_tdls_oper->reason);
862 u16 action = le16_to_cpu(cmd_tdls_oper->tdls_action);
863 struct mwifiex_sta_node *node =
864 mwifiex_get_sta_entry(priv, cmd_tdls_oper->peer_mac);
803 865
866 switch (action) {
867 case ACT_TDLS_DELETE:
868 if (reason)
869 dev_err(priv->adapter->dev,
870 "TDLS link delete for %pM failed: reason %d\n",
871 cmd_tdls_oper->peer_mac, reason);
872 else
873 dev_dbg(priv->adapter->dev,
874 "TDLS link config for %pM successful\n",
875 cmd_tdls_oper->peer_mac);
876 break;
877 case ACT_TDLS_CREATE:
878 if (reason) {
879 dev_err(priv->adapter->dev,
880 "TDLS link creation for %pM failed: reason %d",
881 cmd_tdls_oper->peer_mac, reason);
882 if (node && reason != TDLS_ERR_LINK_EXISTS)
883 node->tdls_status = TDLS_SETUP_FAILURE;
884 } else {
885 dev_dbg(priv->adapter->dev,
886 "TDLS link creation for %pM successful",
887 cmd_tdls_oper->peer_mac);
888 }
889 break;
890 case ACT_TDLS_CONFIG:
891 if (reason) {
892 dev_err(priv->adapter->dev,
893 "TDLS link config for %pM failed, reason %d\n",
894 cmd_tdls_oper->peer_mac, reason);
895 if (node)
896 node->tdls_status = TDLS_SETUP_FAILURE;
897 } else {
898 dev_dbg(priv->adapter->dev,
899 "TDLS link config for %pM successful\n",
900 cmd_tdls_oper->peer_mac);
901 }
902 break;
903 default:
904 dev_err(priv->adapter->dev,
905 "Unknown TDLS command action respnse %d", action);
906 return -1;
907 }
908
909 return 0;
910}
804/* 911/*
805 * This function handles the command response for subscribe event command. 912 * This function handles the command response for subscribe event command.
806 */ 913 */
@@ -871,6 +978,10 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
871 ret = mwifiex_ret_802_11_scan(priv, resp); 978 ret = mwifiex_ret_802_11_scan(priv, resp);
872 adapter->curr_cmd->wait_q_enabled = false; 979 adapter->curr_cmd->wait_q_enabled = false;
873 break; 980 break;
981 case HostCmd_CMD_802_11_SCAN_EXT:
982 ret = mwifiex_ret_802_11_scan_ext(priv);
983 adapter->curr_cmd->wait_q_enabled = false;
984 break;
874 case HostCmd_CMD_802_11_BG_SCAN_QUERY: 985 case HostCmd_CMD_802_11_BG_SCAN_QUERY:
875 ret = mwifiex_ret_802_11_scan(priv, resp); 986 ret = mwifiex_ret_802_11_scan(priv, resp);
876 dev_dbg(adapter->dev, 987 dev_dbg(adapter->dev,
@@ -999,6 +1110,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
999 break; 1110 break;
1000 case HostCmd_CMD_COALESCE_CFG: 1111 case HostCmd_CMD_COALESCE_CFG:
1001 break; 1112 break;
1113 case HostCmd_CMD_TDLS_OPER:
1114 ret = mwifiex_ret_tdls_oper(priv, resp);
1115 break;
1002 default: 1116 default:
1003 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 1117 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
1004 resp->command); 1118 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8c351f71f72f..368450cc56c7 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
54 54
55 priv->scan_block = false; 55 priv->scan_block = false;
56 56
57 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
58 ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
59 mwifiex_disable_all_tdls_links(priv);
60
57 /* Free Tx and Rx packets, report disconnect to upper layer */ 61 /* Free Tx and Rx packets, report disconnect to upper layer */
58 mwifiex_clean_txrx(priv); 62 mwifiex_clean_txrx(priv);
59 63
@@ -112,7 +116,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
112 adapter->tx_lock_flag = false; 116 adapter->tx_lock_flag = false;
113 adapter->pps_uapsd_mode = false; 117 adapter->pps_uapsd_mode = false;
114 118
115 if (adapter->num_cmd_timeout && adapter->curr_cmd) 119 if (adapter->is_cmd_timedout && adapter->curr_cmd)
116 return; 120 return;
117 priv->media_connected = false; 121 priv->media_connected = false;
118 dev_dbg(adapter->dev, 122 dev_dbg(adapter->dev,
@@ -289,9 +293,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
289 293
290 case EVENT_HS_ACT_REQ: 294 case EVENT_HS_ACT_REQ:
291 dev_dbg(adapter->dev, "event: HS_ACT_REQ\n"); 295 dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
292 ret = mwifiex_send_cmd_async(priv, 296 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
293 HostCmd_CMD_802_11_HS_CFG_ENH, 297 0, 0, NULL, false);
294 0, 0, NULL);
295 break; 298 break;
296 299
297 case EVENT_MIC_ERR_UNICAST: 300 case EVENT_MIC_ERR_UNICAST:
@@ -322,27 +325,34 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
322 325
323 case EVENT_BG_SCAN_REPORT: 326 case EVENT_BG_SCAN_REPORT:
324 dev_dbg(adapter->dev, "event: BGS_REPORT\n"); 327 dev_dbg(adapter->dev, "event: BGS_REPORT\n");
325 ret = mwifiex_send_cmd_async(priv, 328 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
326 HostCmd_CMD_802_11_BG_SCAN_QUERY, 329 HostCmd_ACT_GEN_GET, 0, NULL, false);
327 HostCmd_ACT_GEN_GET, 0, NULL);
328 break; 330 break;
329 331
330 case EVENT_PORT_RELEASE: 332 case EVENT_PORT_RELEASE:
331 dev_dbg(adapter->dev, "event: PORT RELEASE\n"); 333 dev_dbg(adapter->dev, "event: PORT RELEASE\n");
332 break; 334 break;
333 335
336 case EVENT_EXT_SCAN_REPORT:
337 dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
338 if (adapter->ext_scan)
339 ret = mwifiex_handle_event_ext_scan_report(priv,
340 adapter->event_skb->data);
341
342 break;
343
334 case EVENT_WMM_STATUS_CHANGE: 344 case EVENT_WMM_STATUS_CHANGE:
335 dev_dbg(adapter->dev, "event: WMM status changed\n"); 345 dev_dbg(adapter->dev, "event: WMM status changed\n");
336 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_WMM_GET_STATUS, 346 ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
337 0, 0, NULL); 347 0, 0, NULL, false);
338 break; 348 break;
339 349
340 case EVENT_RSSI_LOW: 350 case EVENT_RSSI_LOW:
341 cfg80211_cqm_rssi_notify(priv->netdev, 351 cfg80211_cqm_rssi_notify(priv->netdev,
342 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, 352 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
343 GFP_KERNEL); 353 GFP_KERNEL);
344 mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO, 354 mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
345 HostCmd_ACT_GEN_GET, 0, NULL); 355 HostCmd_ACT_GEN_GET, 0, NULL, false);
346 priv->subsc_evt_rssi_state = RSSI_LOW_RECVD; 356 priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
347 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n"); 357 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
348 break; 358 break;
@@ -356,8 +366,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
356 cfg80211_cqm_rssi_notify(priv->netdev, 366 cfg80211_cqm_rssi_notify(priv->netdev,
357 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, 367 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
358 GFP_KERNEL); 368 GFP_KERNEL);
359 mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO, 369 mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
360 HostCmd_ACT_GEN_GET, 0, NULL); 370 HostCmd_ACT_GEN_GET, 0, NULL, false);
361 priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD; 371 priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
362 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n"); 372 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
363 break; 373 break;
@@ -384,15 +394,15 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
384 break; 394 break;
385 case EVENT_IBSS_COALESCED: 395 case EVENT_IBSS_COALESCED:
386 dev_dbg(adapter->dev, "event: IBSS_COALESCED\n"); 396 dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
387 ret = mwifiex_send_cmd_async(priv, 397 ret = mwifiex_send_cmd(priv,
388 HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, 398 HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
389 HostCmd_ACT_GEN_GET, 0, NULL); 399 HostCmd_ACT_GEN_GET, 0, NULL, false);
390 break; 400 break;
391 case EVENT_ADDBA: 401 case EVENT_ADDBA:
392 dev_dbg(adapter->dev, "event: ADDBA Request\n"); 402 dev_dbg(adapter->dev, "event: ADDBA Request\n");
393 mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP, 403 mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
394 HostCmd_ACT_GEN_SET, 0, 404 HostCmd_ACT_GEN_SET, 0,
395 adapter->event_body); 405 adapter->event_body, false);
396 break; 406 break;
397 case EVENT_DELBA: 407 case EVENT_DELBA:
398 dev_dbg(adapter->dev, "event: DELBA Request\n"); 408 dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -443,10 +453,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
443 priv->csa_expire_time = 453 priv->csa_expire_time =
444 jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME); 454 jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
445 priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel; 455 priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
446 ret = mwifiex_send_cmd_async(priv, 456 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
447 HostCmd_CMD_802_11_DEAUTHENTICATE,
448 HostCmd_ACT_GEN_SET, 0, 457 HostCmd_ACT_GEN_SET, 0,
449 priv->curr_bss_params.bss_descriptor.mac_address); 458 priv->curr_bss_params.bss_descriptor.mac_address,
459 false);
450 break; 460 break;
451 461
452 default: 462 default:
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c5cb2ed19ec2..894270611f2c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -64,6 +64,7 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
64 *(cmd_queued->condition)); 64 *(cmd_queued->condition));
65 if (status) { 65 if (status) {
66 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); 66 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
67 mwifiex_cancel_all_pending_cmd(adapter);
67 return status; 68 return status;
68 } 69 }
69 70
@@ -108,19 +109,19 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
108 "info: Set multicast list=%d\n", 109 "info: Set multicast list=%d\n",
109 mcast_list->num_multicast_addr); 110 mcast_list->num_multicast_addr);
110 /* Send multicast addresses to firmware */ 111 /* Send multicast addresses to firmware */
111 ret = mwifiex_send_cmd_async(priv, 112 ret = mwifiex_send_cmd(priv,
112 HostCmd_CMD_MAC_MULTICAST_ADR, 113 HostCmd_CMD_MAC_MULTICAST_ADR,
113 HostCmd_ACT_GEN_SET, 0, 114 HostCmd_ACT_GEN_SET, 0,
114 mcast_list); 115 mcast_list, false);
115 } 116 }
116 } 117 }
117 dev_dbg(priv->adapter->dev, 118 dev_dbg(priv->adapter->dev,
118 "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n", 119 "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
119 old_pkt_filter, priv->curr_pkt_filter); 120 old_pkt_filter, priv->curr_pkt_filter);
120 if (old_pkt_filter != priv->curr_pkt_filter) { 121 if (old_pkt_filter != priv->curr_pkt_filter) {
121 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, 122 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
122 HostCmd_ACT_GEN_SET, 123 HostCmd_ACT_GEN_SET,
123 0, &priv->curr_pkt_filter); 124 0, &priv->curr_pkt_filter, false);
124 } 125 }
125 126
126 return ret; 127 return ret;
@@ -237,8 +238,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
237 238
238 rcu_read_unlock(); 239 rcu_read_unlock();
239 240
240 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, 241 if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
241 HostCmd_ACT_GEN_SET, 0, NULL)) { 242 HostCmd_ACT_GEN_SET, 0, NULL, false)) {
242 wiphy_err(priv->adapter->wiphy, 243 wiphy_err(priv->adapter->wiphy,
243 "11D: setting domain info in FW\n"); 244 "11D: setting domain info in FW\n");
244 return -1; 245 return -1;
@@ -290,7 +291,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
290 291
291 if (mwifiex_band_to_radio_type(bss_desc->bss_band) == 292 if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
292 HostCmd_SCAN_RADIO_TYPE_BG) 293 HostCmd_SCAN_RADIO_TYPE_BG)
293 config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC; 294 config_bands = BAND_B | BAND_G | BAND_GN;
294 else 295 else
295 config_bands = BAND_A | BAND_AN | BAND_AAC; 296 config_bands = BAND_A | BAND_AN | BAND_AAC;
296 297
@@ -429,16 +430,13 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
429 status = -1; 430 status = -1;
430 break; 431 break;
431 } 432 }
432 if (cmd_type == MWIFIEX_SYNC_CMD) 433
433 status = mwifiex_send_cmd_sync(priv, 434 status = mwifiex_send_cmd(priv,
434 HostCmd_CMD_802_11_HS_CFG_ENH, 435 HostCmd_CMD_802_11_HS_CFG_ENH,
435 HostCmd_ACT_GEN_SET, 0, 436 HostCmd_ACT_GEN_SET, 0,
436 &adapter->hs_cfg); 437 &adapter->hs_cfg,
437 else 438 cmd_type == MWIFIEX_SYNC_CMD);
438 status = mwifiex_send_cmd_async(priv, 439
439 HostCmd_CMD_802_11_HS_CFG_ENH,
440 HostCmd_ACT_GEN_SET, 0,
441 &adapter->hs_cfg);
442 if (hs_cfg->conditions == HS_CFG_CANCEL) 440 if (hs_cfg->conditions == HS_CFG_CANCEL)
443 /* Restore previous condition */ 441 /* Restore previous condition */
444 adapter->hs_cfg.conditions = 442 adapter->hs_cfg.conditions =
@@ -511,6 +509,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
511 memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg)); 509 memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
512 hscfg.is_invoke_hostcmd = true; 510 hscfg.is_invoke_hostcmd = true;
513 511
512 adapter->hs_enabling = true;
513 mwifiex_cancel_all_pending_cmd(adapter);
514
514 if (mwifiex_set_hs_params(mwifiex_get_priv(adapter, 515 if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
515 MWIFIEX_BSS_ROLE_STA), 516 MWIFIEX_BSS_ROLE_STA),
516 HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, 517 HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
@@ -519,8 +520,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
519 return false; 520 return false;
520 } 521 }
521 522
522 if (wait_event_interruptible(adapter->hs_activate_wait_q, 523 if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
523 adapter->hs_activate_wait_q_woken)) { 524 adapter->hs_activate_wait_q_woken,
525 (10 * HZ)) <= 0) {
524 dev_err(adapter->dev, "hs_activate_wait_q terminated\n"); 526 dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
525 return false; 527 return false;
526 } 528 }
@@ -586,8 +588,8 @@ int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
586 588
587 auto_ds.auto_ds = DEEP_SLEEP_OFF; 589 auto_ds.auto_ds = DEEP_SLEEP_OFF;
588 590
589 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH, 591 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
590 DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds); 592 DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true);
591} 593}
592EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds); 594EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
593 595
@@ -601,8 +603,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate)
601{ 603{
602 int ret; 604 int ret;
603 605
604 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY, 606 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
605 HostCmd_ACT_GEN_GET, 0, NULL); 607 HostCmd_ACT_GEN_GET, 0, NULL, true);
606 608
607 if (!ret) { 609 if (!ret) {
608 if (priv->is_data_rate_auto) 610 if (priv->is_data_rate_auto)
@@ -698,8 +700,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
698 pg->power_max = (s8) dbm; 700 pg->power_max = (s8) dbm;
699 pg->ht_bandwidth = HT_BW_40; 701 pg->ht_bandwidth = HT_BW_40;
700 } 702 }
701 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TXPWR_CFG, 703 ret = mwifiex_send_cmd(priv, HostCmd_CMD_TXPWR_CFG,
702 HostCmd_ACT_GEN_SET, 0, buf); 704 HostCmd_ACT_GEN_SET, 0, buf, true);
703 705
704 kfree(buf); 706 kfree(buf);
705 return ret; 707 return ret;
@@ -722,12 +724,11 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
722 else 724 else
723 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM; 725 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
724 sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS; 726 sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS;
725 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH, 727 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
726 sub_cmd, BITMAP_STA_PS, NULL); 728 sub_cmd, BITMAP_STA_PS, NULL, true);
727 if ((!ret) && (sub_cmd == DIS_AUTO_PS)) 729 if ((!ret) && (sub_cmd == DIS_AUTO_PS))
728 ret = mwifiex_send_cmd_async(priv, 730 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
729 HostCmd_CMD_802_11_PS_MODE_ENH, 731 GET_PS, 0, NULL, false);
730 GET_PS, 0, NULL);
731 732
732 return ret; 733 return ret;
733} 734}
@@ -851,9 +852,9 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
851 struct mwifiex_ds_encrypt_key *encrypt_key) 852 struct mwifiex_ds_encrypt_key *encrypt_key)
852{ 853{
853 854
854 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL, 855 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
855 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED, 856 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
856 encrypt_key); 857 encrypt_key, true);
857} 858}
858 859
859/* 860/*
@@ -865,6 +866,7 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
865static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, 866static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
866 struct mwifiex_ds_encrypt_key *encrypt_key) 867 struct mwifiex_ds_encrypt_key *encrypt_key)
867{ 868{
869 struct mwifiex_adapter *adapter = priv->adapter;
868 int ret; 870 int ret;
869 struct mwifiex_wep_key *wep_key; 871 struct mwifiex_wep_key *wep_key;
870 int index; 872 int index;
@@ -879,10 +881,17 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
879 /* Copy the required key as the current key */ 881 /* Copy the required key as the current key */
880 wep_key = &priv->wep_key[index]; 882 wep_key = &priv->wep_key[index];
881 if (!wep_key->key_length) { 883 if (!wep_key->key_length) {
882 dev_err(priv->adapter->dev, 884 dev_err(adapter->dev,
883 "key not set, so cannot enable it\n"); 885 "key not set, so cannot enable it\n");
884 return -1; 886 return -1;
885 } 887 }
888
889 if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) {
890 memcpy(encrypt_key->key_material,
891 wep_key->key_material, wep_key->key_length);
892 encrypt_key->key_len = wep_key->key_length;
893 }
894
886 priv->wep_key_curr_index = (u16) index; 895 priv->wep_key_curr_index = (u16) index;
887 priv->sec_info.wep_enabled = 1; 896 priv->sec_info.wep_enabled = 1;
888 } else { 897 } else {
@@ -897,21 +906,32 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
897 priv->sec_info.wep_enabled = 1; 906 priv->sec_info.wep_enabled = 1;
898 } 907 }
899 if (wep_key->key_length) { 908 if (wep_key->key_length) {
909 void *enc_key;
910
911 if (encrypt_key->key_disable)
912 memset(&priv->wep_key[index], 0,
913 sizeof(struct mwifiex_wep_key));
914
915 if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
916 enc_key = encrypt_key;
917 else
918 enc_key = NULL;
919
900 /* Send request to firmware */ 920 /* Send request to firmware */
901 ret = mwifiex_send_cmd_async(priv, 921 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
902 HostCmd_CMD_802_11_KEY_MATERIAL, 922 HostCmd_ACT_GEN_SET, 0, enc_key, false);
903 HostCmd_ACT_GEN_SET, 0, NULL);
904 if (ret) 923 if (ret)
905 return ret; 924 return ret;
906 } 925 }
926
907 if (priv->sec_info.wep_enabled) 927 if (priv->sec_info.wep_enabled)
908 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE; 928 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
909 else 929 else
910 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE; 930 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
911 931
912 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL, 932 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
913 HostCmd_ACT_GEN_SET, 0, 933 HostCmd_ACT_GEN_SET, 0,
914 &priv->curr_pkt_filter); 934 &priv->curr_pkt_filter, true);
915 935
916 return ret; 936 return ret;
917} 937}
@@ -946,10 +966,9 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
946 */ 966 */
947 /* Send the key as PTK to firmware */ 967 /* Send the key as PTK to firmware */
948 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST; 968 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
949 ret = mwifiex_send_cmd_async(priv, 969 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
950 HostCmd_CMD_802_11_KEY_MATERIAL, 970 HostCmd_ACT_GEN_SET,
951 HostCmd_ACT_GEN_SET, 971 KEY_INFO_ENABLED, encrypt_key, false);
952 KEY_INFO_ENABLED, encrypt_key);
953 if (ret) 972 if (ret)
954 return ret; 973 return ret;
955 974
@@ -973,15 +992,13 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
973 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST; 992 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
974 993
975 if (remove_key) 994 if (remove_key)
976 ret = mwifiex_send_cmd_sync(priv, 995 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
977 HostCmd_CMD_802_11_KEY_MATERIAL, 996 HostCmd_ACT_GEN_SET,
978 HostCmd_ACT_GEN_SET, 997 !KEY_INFO_ENABLED, encrypt_key, true);
979 !KEY_INFO_ENABLED, encrypt_key);
980 else 998 else
981 ret = mwifiex_send_cmd_sync(priv, 999 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
982 HostCmd_CMD_802_11_KEY_MATERIAL, 1000 HostCmd_ACT_GEN_SET,
983 HostCmd_ACT_GEN_SET, 1001 KEY_INFO_ENABLED, encrypt_key, true);
984 KEY_INFO_ENABLED, encrypt_key);
985 1002
986 return ret; 1003 return ret;
987} 1004}
@@ -1044,19 +1061,27 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
1044 1061
1045 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key)); 1062 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
1046 encrypt_key.key_len = key_len; 1063 encrypt_key.key_len = key_len;
1064 encrypt_key.key_index = key_index;
1047 1065
1048 if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC) 1066 if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
1049 encrypt_key.is_igtk_key = true; 1067 encrypt_key.is_igtk_key = true;
1050 1068
1051 if (!disable) { 1069 if (!disable) {
1052 encrypt_key.key_index = key_index;
1053 if (key_len) 1070 if (key_len)
1054 memcpy(encrypt_key.key_material, key, key_len); 1071 memcpy(encrypt_key.key_material, key, key_len);
1072 else
1073 encrypt_key.is_current_wep_key = true;
1074
1055 if (mac_addr) 1075 if (mac_addr)
1056 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); 1076 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
1057 if (kp && kp->seq && kp->seq_len) 1077 if (kp && kp->seq && kp->seq_len) {
1058 memcpy(encrypt_key.pn, kp->seq, kp->seq_len); 1078 memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
1079 encrypt_key.pn_len = kp->seq_len;
1080 encrypt_key.is_rx_seq_valid = true;
1081 }
1059 } else { 1082 } else {
1083 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
1084 return 0;
1060 encrypt_key.key_disable = true; 1085 encrypt_key.key_disable = true;
1061 if (mac_addr) 1086 if (mac_addr)
1062 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); 1087 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
@@ -1077,8 +1102,8 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
1077 struct mwifiex_ver_ext ver_ext; 1102 struct mwifiex_ver_ext ver_ext;
1078 1103
1079 memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext)); 1104 memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
1080 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT, 1105 if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
1081 HostCmd_ACT_GEN_GET, 0, &ver_ext)) 1106 HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
1082 return -1; 1107 return -1;
1083 1108
1084 return 0; 1109 return 0;
@@ -1103,8 +1128,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1103 ieee80211_frequency_to_channel(chan->center_freq); 1128 ieee80211_frequency_to_channel(chan->center_freq);
1104 roc_cfg.duration = cpu_to_le32(duration); 1129 roc_cfg.duration = cpu_to_le32(duration);
1105 } 1130 }
1106 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN, 1131 if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
1107 action, 0, &roc_cfg)) { 1132 action, 0, &roc_cfg, true)) {
1108 dev_err(priv->adapter->dev, "failed to remain on channel\n"); 1133 dev_err(priv->adapter->dev, "failed to remain on channel\n");
1109 return -1; 1134 return -1;
1110 } 1135 }
@@ -1136,8 +1161,8 @@ mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
1136 break; 1161 break;
1137 } 1162 }
1138 1163
1139 mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE, 1164 mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
1140 HostCmd_ACT_GEN_SET, 0, NULL); 1165 HostCmd_ACT_GEN_SET, 0, NULL, true);
1141 1166
1142 return mwifiex_sta_init_cmd(priv, false); 1167 return mwifiex_sta_init_cmd(priv, false);
1143} 1168}
@@ -1152,8 +1177,8 @@ int
1152mwifiex_get_stats_info(struct mwifiex_private *priv, 1177mwifiex_get_stats_info(struct mwifiex_private *priv,
1153 struct mwifiex_ds_get_stats *log) 1178 struct mwifiex_ds_get_stats *log)
1154{ 1179{
1155 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG, 1180 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_GET_LOG,
1156 HostCmd_ACT_GEN_GET, 0, log); 1181 HostCmd_ACT_GEN_GET, 0, log, true);
1157} 1182}
1158 1183
1159/* 1184/*
@@ -1195,8 +1220,7 @@ static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
1195 return -1; 1220 return -1;
1196 } 1221 }
1197 1222
1198 return mwifiex_send_cmd_sync(priv, cmd_no, action, 0, reg_rw); 1223 return mwifiex_send_cmd(priv, cmd_no, action, 0, reg_rw, true);
1199
1200} 1224}
1201 1225
1202/* 1226/*
@@ -1261,8 +1285,8 @@ mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
1261 rd_eeprom.byte_count = cpu_to_le16((u16) bytes); 1285 rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
1262 1286
1263 /* Send request to firmware */ 1287 /* Send request to firmware */
1264 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_EEPROM_ACCESS, 1288 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
1265 HostCmd_ACT_GEN_GET, 0, &rd_eeprom); 1289 HostCmd_ACT_GEN_GET, 0, &rd_eeprom, true);
1266 1290
1267 if (!ret) 1291 if (!ret)
1268 memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA); 1292 memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
@@ -1391,7 +1415,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
1391 * with requisite parameters and calls the IOCTL handler. 1415 * with requisite parameters and calls the IOCTL handler.
1392 */ 1416 */
1393int 1417int
1394mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len) 1418mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
1395{ 1419{
1396 struct mwifiex_ds_misc_gen_ie gen_ie; 1420 struct mwifiex_ds_misc_gen_ie gen_ie;
1397 1421
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 4651d676df38..ed26387eccf5 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -88,11 +88,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
88 struct rxpd *local_rx_pd; 88 struct rxpd *local_rx_pd;
89 int hdr_chop; 89 int hdr_chop;
90 struct ethhdr *eth; 90 struct ethhdr *eth;
91 u16 rx_pkt_off, rx_pkt_len;
92 u8 *offset;
91 93
92 local_rx_pd = (struct rxpd *) (skb->data); 94 local_rx_pd = (struct rxpd *) (skb->data);
93 95
94 rx_pkt_hdr = (void *)local_rx_pd + 96 rx_pkt_off = le16_to_cpu(local_rx_pd->rx_pkt_offset);
95 le16_to_cpu(local_rx_pd->rx_pkt_offset); 97 rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
98 rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
96 99
97 if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, 100 if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
98 sizeof(bridge_tunnel_header))) || 101 sizeof(bridge_tunnel_header))) ||
@@ -142,6 +145,12 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
142 return 0; 145 return 0;
143 } 146 }
144 147
148 if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
149 ntohs(rx_pkt_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) {
150 offset = (u8 *)local_rx_pd + rx_pkt_off;
151 mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
152 }
153
145 priv->rxpd_rate = local_rx_pd->rx_rate; 154 priv->rxpd_rate = local_rx_pd->rx_rate;
146 155
147 priv->rxpd_htinfo = local_rx_pd->ht_info; 156 priv->rxpd_htinfo = local_rx_pd->ht_info;
@@ -192,26 +201,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
192 return ret; 201 return ret;
193 } 202 }
194 203
195 if (rx_pkt_type == PKT_TYPE_AMSDU) { 204 if (rx_pkt_type == PKT_TYPE_MGMT) {
196 struct sk_buff_head list;
197 struct sk_buff *rx_skb;
198
199 __skb_queue_head_init(&list);
200
201 skb_pull(skb, rx_pkt_offset);
202 skb_trim(skb, rx_pkt_length);
203
204 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
205 priv->wdev->iftype, 0, false);
206
207 while (!skb_queue_empty(&list)) {
208 rx_skb = __skb_dequeue(&list);
209 ret = mwifiex_recv_packet(priv, rx_skb);
210 if (ret == -1)
211 dev_err(adapter->dev, "Rx of A-MSDU failed");
212 }
213 return 0;
214 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
215 ret = mwifiex_process_mgmt_packet(priv, skb); 205 ret = mwifiex_process_mgmt_packet(priv, skb);
216 if (ret) 206 if (ret)
217 dev_err(adapter->dev, "Rx of mgmt packet failed"); 207 dev_err(adapter->dev, "Rx of mgmt packet failed");
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 354d64c9606f..1236a5de7bca 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -95,6 +95,9 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
95 } 95 }
96 } 96 }
97 97
98 if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
99 local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
100
98 /* Offset of actual data */ 101 /* Offset of actual data */
99 pkt_offset = sizeof(struct txpd) + pad; 102 pkt_offset = sizeof(struct txpd) + pad;
100 if (pkt_type == PKT_TYPE_MGMT) { 103 if (pkt_type == PKT_TYPE_MGMT) {
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
new file mode 100644
index 000000000000..97662a1ba58c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -0,0 +1,1044 @@
1/* Marvell Wireless LAN device driver: TDLS handling
2 *
3 * Copyright (C) 2014, Marvell International Ltd.
4 *
5 * This software file (the "File") is distributed by Marvell International
6 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
7 * (the "License"). You may use, redistribute and/or modify this File in
8 * accordance with the terms and conditions of the License, a copy of which
9 * is available on the worldwide web at
10 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
11 *
12 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
13 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
14 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
15 * this warranty disclaimer.
16 */
17
18#include "main.h"
19#include "wmm.h"
20#include "11n.h"
21#include "11n_rxreorder.h"
22#include "11ac.h"
23
24#define TDLS_REQ_FIX_LEN 6
25#define TDLS_RESP_FIX_LEN 8
26#define TDLS_CONFIRM_FIX_LEN 6
27
28static void
29mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
30{
31 struct mwifiex_ra_list_tbl *ra_list;
32 struct list_head *tid_list;
33 struct sk_buff *skb, *tmp;
34 struct mwifiex_txinfo *tx_info;
35 unsigned long flags;
36 u32 tid;
37 u8 tid_down;
38
39 dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
40 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
41
42 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
43 if (!ether_addr_equal(mac, skb->data))
44 continue;
45
46 __skb_unlink(skb, &priv->tdls_txq);
47 tx_info = MWIFIEX_SKB_TXCB(skb);
48 tid = skb->priority;
49 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
50
51 if (status == TDLS_SETUP_COMPLETE) {
52 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
53 ra_list->tdls_link = true;
54 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
55 } else {
56 tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
57 if (!list_empty(tid_list))
58 ra_list = list_first_entry(tid_list,
59 struct mwifiex_ra_list_tbl, list);
60 else
61 ra_list = NULL;
62 tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
63 }
64
65 if (!ra_list) {
66 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
67 continue;
68 }
69
70 skb_queue_tail(&ra_list->skb_head, skb);
71
72 ra_list->ba_pkt_count++;
73 ra_list->total_pkt_count++;
74
75 if (atomic_read(&priv->wmm.highest_queued_prio) <
76 tos_to_tid_inv[tid_down])
77 atomic_set(&priv->wmm.highest_queued_prio,
78 tos_to_tid_inv[tid_down]);
79
80 atomic_inc(&priv->wmm.tx_pkts_queued);
81 }
82
83 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
84 return;
85}
86
87static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
88{
89 struct mwifiex_ra_list_tbl *ra_list;
90 struct list_head *ra_list_head;
91 struct sk_buff *skb, *tmp;
92 unsigned long flags;
93 int i;
94
95 dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
96 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
97
98 for (i = 0; i < MAX_NUM_TID; i++) {
99 if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
100 ra_list_head = &priv->wmm.tid_tbl_ptr[i].ra_list;
101 list_for_each_entry(ra_list, ra_list_head, list) {
102 skb_queue_walk_safe(&ra_list->skb_head, skb,
103 tmp) {
104 if (!ether_addr_equal(mac, skb->data))
105 continue;
106 __skb_unlink(skb, &ra_list->skb_head);
107 atomic_dec(&priv->wmm.tx_pkts_queued);
108 ra_list->total_pkt_count--;
109 skb_queue_tail(&priv->tdls_txq, skb);
110 }
111 }
112 }
113 }
114
115 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
116 return;
117}
118
119/* This function appends rate TLV to scan config command. */
120static int
121mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
122 struct sk_buff *skb)
123{
124 u8 rates[MWIFIEX_SUPPORTED_RATES], *pos;
125 u16 rates_size, supp_rates_size, ext_rates_size;
126
127 memset(rates, 0, sizeof(rates));
128 rates_size = mwifiex_get_supported_rates(priv, rates);
129
130 supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
131
132 if (skb_tailroom(skb) < rates_size + 4) {
133 dev_err(priv->adapter->dev,
134 "Insuffient space while adding rates\n");
135 return -ENOMEM;
136 }
137
138 pos = skb_put(skb, supp_rates_size + 2);
139 *pos++ = WLAN_EID_SUPP_RATES;
140 *pos++ = supp_rates_size;
141 memcpy(pos, rates, supp_rates_size);
142
143 if (rates_size > MWIFIEX_TDLS_SUPPORTED_RATES) {
144 ext_rates_size = rates_size - MWIFIEX_TDLS_SUPPORTED_RATES;
145 pos = skb_put(skb, ext_rates_size + 2);
146 *pos++ = WLAN_EID_EXT_SUPP_RATES;
147 *pos++ = ext_rates_size;
148 memcpy(pos, rates + MWIFIEX_TDLS_SUPPORTED_RATES,
149 ext_rates_size);
150 }
151
152 return 0;
153}
154
155static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
156 struct sk_buff *skb)
157{
158 struct ieee_types_assoc_rsp *assoc_rsp;
159 u8 *pos;
160
161 assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
162 pos = (void *)skb_put(skb, 4);
163 *pos++ = WLAN_EID_AID;
164 *pos++ = 2;
165 *pos++ = le16_to_cpu(assoc_rsp->a_id);
166
167 return;
168}
169
170static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
171 struct sk_buff *skb)
172{
173 struct ieee80211_vht_cap vht_cap;
174 u8 *pos;
175
176 pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
177 *pos++ = WLAN_EID_VHT_CAPABILITY;
178 *pos++ = sizeof(struct ieee80211_vht_cap);
179
180 memset(&vht_cap, 0, sizeof(struct ieee80211_vht_cap));
181
182 mwifiex_fill_vht_cap_tlv(priv, &vht_cap, priv->curr_bss_params.band);
183 memcpy(pos, &vht_cap, sizeof(vht_cap));
184
185 return 0;
186}
187
188static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
189 u8 *mac, struct sk_buff *skb)
190{
191 struct mwifiex_bssdescriptor *bss_desc;
192 struct ieee80211_vht_operation *vht_oper;
193 struct ieee80211_vht_cap *vht_cap, *ap_vht_cap = NULL;
194 struct mwifiex_sta_node *sta_ptr;
195 struct mwifiex_adapter *adapter = priv->adapter;
196 u8 supp_chwd_set, peer_supp_chwd_set;
197 u8 *pos, ap_supp_chwd_set, chan_bw;
198 u16 mcs_map_user, mcs_map_resp, mcs_map_result;
199 u16 mcs_user, mcs_resp, nss;
200 u32 usr_vht_cap_info;
201
202 bss_desc = &priv->curr_bss_params.bss_descriptor;
203
204 sta_ptr = mwifiex_get_sta_entry(priv, mac);
205 if (unlikely(!sta_ptr)) {
206 dev_warn(adapter->dev, "TDLS peer station not found in list\n");
207 return -1;
208 }
209
210 if (!mwifiex_is_bss_in_11ac_mode(priv)) {
211 if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
212 WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
213 dev_dbg(adapter->dev,
214 "TDLS peer doesn't support wider bandwitdh\n");
215 return 0;
216 }
217 } else {
218 ap_vht_cap = bss_desc->bcn_vht_cap;
219 }
220
221 pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
222 *pos++ = WLAN_EID_VHT_OPERATION;
223 *pos++ = sizeof(struct ieee80211_vht_operation);
224 vht_oper = (struct ieee80211_vht_operation *)pos;
225
226 if (bss_desc->bss_band & BAND_A)
227 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
228 else
229 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
230
231 /* find the minmum bandwith between AP/TDLS peers */
232 vht_cap = &sta_ptr->tdls_cap.vhtcap;
233 supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
234 peer_supp_chwd_set =
235 GET_VHTCAP_CHWDSET(le32_to_cpu(vht_cap->vht_cap_info));
236 supp_chwd_set = min_t(u8, supp_chwd_set, peer_supp_chwd_set);
237
238 /* We need check AP's bandwidth when TDLS_WIDER_BANDWIDTH is off */
239
240 if (ap_vht_cap && sta_ptr->tdls_cap.extcap.ext_capab[7] &
241 WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
242 ap_supp_chwd_set =
243 GET_VHTCAP_CHWDSET(le32_to_cpu(ap_vht_cap->vht_cap_info));
244 supp_chwd_set = min_t(u8, supp_chwd_set, ap_supp_chwd_set);
245 }
246
247 switch (supp_chwd_set) {
248 case IEEE80211_VHT_CHANWIDTH_80MHZ:
249 vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
250 break;
251 case IEEE80211_VHT_CHANWIDTH_160MHZ:
252 vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
253 break;
254 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
255 vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
256 break;
257 default:
258 vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
259 break;
260 }
261
262 mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
263 mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
264 mcs_map_result = 0;
265
266 for (nss = 1; nss <= 8; nss++) {
267 mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
268 mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
269
270 if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
271 (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
272 SET_VHTNSSMCS(mcs_map_result, nss,
273 IEEE80211_VHT_MCS_NOT_SUPPORTED);
274 else
275 SET_VHTNSSMCS(mcs_map_result, nss,
276 min_t(u16, mcs_user, mcs_resp));
277 }
278
279 vht_oper->basic_mcs_set = cpu_to_le16(mcs_map_result);
280
281 switch (vht_oper->chan_width) {
282 case IEEE80211_VHT_CHANWIDTH_80MHZ:
283 chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
284 break;
285 case IEEE80211_VHT_CHANWIDTH_160MHZ:
286 chan_bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
287 break;
288 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
289 chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
290 break;
291 default:
292 chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
293 break;
294 }
295 vht_oper->center_freq_seg1_idx =
296 mwifiex_get_center_freq_index(priv, BAND_AAC,
297 bss_desc->channel,
298 chan_bw);
299
300 return 0;
301}
302
303static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
304 struct sk_buff *skb)
305{
306 struct ieee_types_extcap *extcap;
307
308 extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
309 extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
310 extcap->ieee_hdr.len = 8;
311 memset(extcap->ext_capab, 0, 8);
312 extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
313
314 if (priv->adapter->is_hw_11ac_capable)
315 extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
316}
317
318static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
319{
320 u8 *pos = (void *)skb_put(skb, 3);
321
322 *pos++ = WLAN_EID_QOS_CAPA;
323 *pos++ = 1;
324 *pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
325}
326
327static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
328 u8 *peer, u8 action_code, u8 dialog_token,
329 u16 status_code, struct sk_buff *skb)
330{
331 struct ieee80211_tdls_data *tf;
332 int ret;
333 u16 capab;
334 struct ieee80211_ht_cap *ht_cap;
335 u8 radio, *pos;
336
337 capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
338
339 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
340 memcpy(tf->da, peer, ETH_ALEN);
341 memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
342 tf->ether_type = cpu_to_be16(ETH_P_TDLS);
343 tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
344
345 switch (action_code) {
346 case WLAN_TDLS_SETUP_REQUEST:
347 tf->category = WLAN_CATEGORY_TDLS;
348 tf->action_code = WLAN_TDLS_SETUP_REQUEST;
349 skb_put(skb, sizeof(tf->u.setup_req));
350 tf->u.setup_req.dialog_token = dialog_token;
351 tf->u.setup_req.capability = cpu_to_le16(capab);
352 ret = mwifiex_tdls_append_rates_ie(priv, skb);
353 if (ret) {
354 dev_kfree_skb_any(skb);
355 return ret;
356 }
357
358 pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
359 *pos++ = WLAN_EID_HT_CAPABILITY;
360 *pos++ = sizeof(struct ieee80211_ht_cap);
361 ht_cap = (void *)pos;
362 radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
363 ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
364 if (ret) {
365 dev_kfree_skb_any(skb);
366 return ret;
367 }
368
369 if (priv->adapter->is_hw_11ac_capable) {
370 ret = mwifiex_tdls_add_vht_capab(priv, skb);
371 if (ret) {
372 dev_kfree_skb_any(skb);
373 return ret;
374 }
375 mwifiex_tdls_add_aid(priv, skb);
376 }
377
378 mwifiex_tdls_add_ext_capab(priv, skb);
379 mwifiex_tdls_add_qos_capab(skb);
380 break;
381
382 case WLAN_TDLS_SETUP_RESPONSE:
383 tf->category = WLAN_CATEGORY_TDLS;
384 tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
385 skb_put(skb, sizeof(tf->u.setup_resp));
386 tf->u.setup_resp.status_code = cpu_to_le16(status_code);
387 tf->u.setup_resp.dialog_token = dialog_token;
388 tf->u.setup_resp.capability = cpu_to_le16(capab);
389 ret = mwifiex_tdls_append_rates_ie(priv, skb);
390 if (ret) {
391 dev_kfree_skb_any(skb);
392 return ret;
393 }
394
395 pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
396 *pos++ = WLAN_EID_HT_CAPABILITY;
397 *pos++ = sizeof(struct ieee80211_ht_cap);
398 ht_cap = (void *)pos;
399 radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
400 ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
401 if (ret) {
402 dev_kfree_skb_any(skb);
403 return ret;
404 }
405
406 if (priv->adapter->is_hw_11ac_capable) {
407 ret = mwifiex_tdls_add_vht_capab(priv, skb);
408 if (ret) {
409 dev_kfree_skb_any(skb);
410 return ret;
411 }
412 mwifiex_tdls_add_aid(priv, skb);
413 }
414
415 mwifiex_tdls_add_ext_capab(priv, skb);
416 mwifiex_tdls_add_qos_capab(skb);
417 break;
418
419 case WLAN_TDLS_SETUP_CONFIRM:
420 tf->category = WLAN_CATEGORY_TDLS;
421 tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
422 skb_put(skb, sizeof(tf->u.setup_cfm));
423 tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
424 tf->u.setup_cfm.dialog_token = dialog_token;
425 if (priv->adapter->is_hw_11ac_capable) {
426 ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
427 if (ret) {
428 dev_kfree_skb_any(skb);
429 return ret;
430 }
431 }
432 break;
433
434 case WLAN_TDLS_TEARDOWN:
435 tf->category = WLAN_CATEGORY_TDLS;
436 tf->action_code = WLAN_TDLS_TEARDOWN;
437 skb_put(skb, sizeof(tf->u.teardown));
438 tf->u.teardown.reason_code = cpu_to_le16(status_code);
439 break;
440
441 case WLAN_TDLS_DISCOVERY_REQUEST:
442 tf->category = WLAN_CATEGORY_TDLS;
443 tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
444 skb_put(skb, sizeof(tf->u.discover_req));
445 tf->u.discover_req.dialog_token = dialog_token;
446 break;
447 default:
448 dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
449 return -EINVAL;
450 }
451
452 return 0;
453}
454
455static void
456mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
457{
458 struct ieee80211_tdls_lnkie *lnkid;
459
460 lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
461 lnkid->ie_type = WLAN_EID_LINK_ID;
462 lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
463 sizeof(struct ieee_types_header);
464
465 memcpy(lnkid->bssid, bssid, ETH_ALEN);
466 memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
467 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
468}
469
470int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
471 u8 *peer, u8 action_code, u8 dialog_token,
472 u16 status_code, const u8 *extra_ies,
473 size_t extra_ies_len)
474{
475 struct sk_buff *skb;
476 struct mwifiex_txinfo *tx_info;
477 struct timeval tv;
478 int ret;
479 u16 skb_len;
480
481 skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
482 max(sizeof(struct ieee80211_mgmt),
483 sizeof(struct ieee80211_tdls_data)) +
484 MWIFIEX_MGMT_FRAME_HEADER_SIZE +
485 MWIFIEX_SUPPORTED_RATES +
486 3 + /* Qos Info */
487 sizeof(struct ieee_types_extcap) +
488 sizeof(struct ieee80211_ht_cap) +
489 sizeof(struct ieee_types_bss_co_2040) +
490 sizeof(struct ieee80211_ht_operation) +
491 sizeof(struct ieee80211_tdls_lnkie) +
492 extra_ies_len;
493
494 if (priv->adapter->is_hw_11ac_capable)
495 skb_len += sizeof(struct ieee_types_vht_cap) +
496 sizeof(struct ieee_types_vht_oper) +
497 sizeof(struct ieee_types_aid);
498
499 skb = dev_alloc_skb(skb_len);
500 if (!skb) {
501 dev_err(priv->adapter->dev,
502 "allocate skb failed for management frame\n");
503 return -ENOMEM;
504 }
505 skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
506
507 switch (action_code) {
508 case WLAN_TDLS_SETUP_REQUEST:
509 case WLAN_TDLS_SETUP_CONFIRM:
510 case WLAN_TDLS_TEARDOWN:
511 case WLAN_TDLS_DISCOVERY_REQUEST:
512 ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
513 dialog_token, status_code,
514 skb);
515 if (ret) {
516 dev_kfree_skb_any(skb);
517 return ret;
518 }
519 if (extra_ies_len)
520 memcpy(skb_put(skb, extra_ies_len), extra_ies,
521 extra_ies_len);
522 mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
523 priv->cfg_bssid);
524 break;
525 case WLAN_TDLS_SETUP_RESPONSE:
526 ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
527 dialog_token, status_code,
528 skb);
529 if (ret) {
530 dev_kfree_skb_any(skb);
531 return ret;
532 }
533 if (extra_ies_len)
534 memcpy(skb_put(skb, extra_ies_len), extra_ies,
535 extra_ies_len);
536 mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
537 priv->cfg_bssid);
538 break;
539 }
540
541 switch (action_code) {
542 case WLAN_TDLS_SETUP_REQUEST:
543 case WLAN_TDLS_SETUP_RESPONSE:
544 skb->priority = MWIFIEX_PRIO_BK;
545 break;
546 default:
547 skb->priority = MWIFIEX_PRIO_VI;
548 break;
549 }
550
551 tx_info = MWIFIEX_SKB_TXCB(skb);
552 tx_info->bss_num = priv->bss_num;
553 tx_info->bss_type = priv->bss_type;
554
555 do_gettimeofday(&tv);
556 skb->tstamp = timeval_to_ktime(tv);
557 mwifiex_queue_tx_pkt(priv, skb);
558
559 return 0;
560}
561
562static int
563mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
564 u8 action_code, u8 dialog_token,
565 u16 status_code, struct sk_buff *skb)
566{
567 struct ieee80211_mgmt *mgmt;
568 u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
569 int ret;
570 u16 capab;
571 struct ieee80211_ht_cap *ht_cap;
572 u8 radio, *pos;
573
574 capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
575
576 mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
577
578 memset(mgmt, 0, 24);
579 memcpy(mgmt->da, peer, ETH_ALEN);
580 memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
581 memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
582 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
583 IEEE80211_STYPE_ACTION);
584
585 /* add address 4 */
586 pos = skb_put(skb, ETH_ALEN);
587
588 switch (action_code) {
589 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
590 skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
591 mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
592 mgmt->u.action.u.tdls_discover_resp.action_code =
593 WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
594 mgmt->u.action.u.tdls_discover_resp.dialog_token =
595 dialog_token;
596 mgmt->u.action.u.tdls_discover_resp.capability =
597 cpu_to_le16(capab);
598 /* move back for addr4 */
599 memmove(pos + ETH_ALEN, &mgmt->u.action.category,
600 sizeof(mgmt->u.action.u.tdls_discover_resp));
601 /* init address 4 */
602 memcpy(pos, bc_addr, ETH_ALEN);
603
604 ret = mwifiex_tdls_append_rates_ie(priv, skb);
605 if (ret) {
606 dev_kfree_skb_any(skb);
607 return ret;
608 }
609
610 pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
611 *pos++ = WLAN_EID_HT_CAPABILITY;
612 *pos++ = sizeof(struct ieee80211_ht_cap);
613 ht_cap = (void *)pos;
614 radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
615 ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
616 if (ret) {
617 dev_kfree_skb_any(skb);
618 return ret;
619 }
620
621 if (priv->adapter->is_hw_11ac_capable) {
622 ret = mwifiex_tdls_add_vht_capab(priv, skb);
623 if (ret) {
624 dev_kfree_skb_any(skb);
625 return ret;
626 }
627 mwifiex_tdls_add_aid(priv, skb);
628 }
629
630 mwifiex_tdls_add_ext_capab(priv, skb);
631 mwifiex_tdls_add_qos_capab(skb);
632 break;
633 default:
634 dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
635 return -EINVAL;
636 }
637
638 return 0;
639}
640
641int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
642 u8 *peer, u8 action_code, u8 dialog_token,
643 u16 status_code, const u8 *extra_ies,
644 size_t extra_ies_len)
645{
646 struct sk_buff *skb;
647 struct mwifiex_txinfo *tx_info;
648 struct timeval tv;
649 u8 *pos;
650 u32 pkt_type, tx_control;
651 u16 pkt_len, skb_len;
652
653 skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
654 max(sizeof(struct ieee80211_mgmt),
655 sizeof(struct ieee80211_tdls_data)) +
656 MWIFIEX_MGMT_FRAME_HEADER_SIZE +
657 MWIFIEX_SUPPORTED_RATES +
658 sizeof(struct ieee_types_extcap) +
659 sizeof(struct ieee80211_ht_cap) +
660 sizeof(struct ieee_types_bss_co_2040) +
661 sizeof(struct ieee80211_ht_operation) +
662 sizeof(struct ieee80211_tdls_lnkie) +
663 extra_ies_len +
664 3 + /* Qos Info */
665 ETH_ALEN; /* Address4 */
666
667 if (priv->adapter->is_hw_11ac_capable)
668 skb_len += sizeof(struct ieee_types_vht_cap) +
669 sizeof(struct ieee_types_vht_oper) +
670 sizeof(struct ieee_types_aid);
671
672 skb = dev_alloc_skb(skb_len);
673 if (!skb) {
674 dev_err(priv->adapter->dev,
675 "allocate skb failed for management frame\n");
676 return -ENOMEM;
677 }
678
679 skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
680
681 pkt_type = PKT_TYPE_MGMT;
682 tx_control = 0;
683 pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
684 memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
685 memcpy(pos, &pkt_type, sizeof(pkt_type));
686 memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
687
688 if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
689 dialog_token, status_code,
690 skb)) {
691 dev_kfree_skb_any(skb);
692 return -EINVAL;
693 }
694
695 if (extra_ies_len)
696 memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
697
698 /* the TDLS link IE is always added last we are the responder */
699
700 mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
701 priv->cfg_bssid);
702
703 skb->priority = MWIFIEX_PRIO_VI;
704
705 tx_info = MWIFIEX_SKB_TXCB(skb);
706 tx_info->bss_num = priv->bss_num;
707 tx_info->bss_type = priv->bss_type;
708 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
709
710 pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
711 memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
712 sizeof(pkt_len));
713 do_gettimeofday(&tv);
714 skb->tstamp = timeval_to_ktime(tv);
715 mwifiex_queue_tx_pkt(priv, skb);
716
717 return 0;
718}
719
720/* This function process tdls action frame from peer.
721 * Peer capabilities are stored into station node structure.
722 */
723void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
724 u8 *buf, int len)
725{
726 struct mwifiex_sta_node *sta_ptr;
727 u8 *peer, *pos, *end;
728 u8 i, action, basic;
729 int ie_len = 0;
730
731 if (len < (sizeof(struct ethhdr) + 3))
732 return;
733 if (*(buf + sizeof(struct ethhdr)) != WLAN_TDLS_SNAP_RFTYPE)
734 return;
735 if (*(buf + sizeof(struct ethhdr) + 1) != WLAN_CATEGORY_TDLS)
736 return;
737
738 peer = buf + ETH_ALEN;
739 action = *(buf + sizeof(struct ethhdr) + 2);
740
741 /* just handle TDLS setup request/response/confirm */
742 if (action > WLAN_TDLS_SETUP_CONFIRM)
743 return;
744
745 dev_dbg(priv->adapter->dev,
746 "rx:tdls action: peer=%pM, action=%d\n", peer, action);
747
748 sta_ptr = mwifiex_add_sta_entry(priv, peer);
749 if (!sta_ptr)
750 return;
751
752 switch (action) {
753 case WLAN_TDLS_SETUP_REQUEST:
754 if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
755 return;
756
757 pos = buf + sizeof(struct ethhdr) + 4;
758 /* payload 1+ category 1 + action 1 + dialog 1 */
759 sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
760 ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
761 pos += 2;
762 break;
763
764 case WLAN_TDLS_SETUP_RESPONSE:
765 if (len < (sizeof(struct ethhdr) + TDLS_RESP_FIX_LEN))
766 return;
767 /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
768 pos = buf + sizeof(struct ethhdr) + 6;
769 sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
770 ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
771 pos += 2;
772 break;
773
774 case WLAN_TDLS_SETUP_CONFIRM:
775 if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
776 return;
777 pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
778 ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
779 break;
780 default:
781 dev_warn(priv->adapter->dev, "Unknown TDLS frame type.\n");
782 return;
783 }
784
785 for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
786 if (pos + 2 + pos[1] > end)
787 break;
788
789 switch (*pos) {
790 case WLAN_EID_SUPP_RATES:
791 sta_ptr->tdls_cap.rates_len = pos[1];
792 for (i = 0; i < pos[1]; i++)
793 sta_ptr->tdls_cap.rates[i] = pos[i + 2];
794 break;
795
796 case WLAN_EID_EXT_SUPP_RATES:
797 basic = sta_ptr->tdls_cap.rates_len;
798 for (i = 0; i < pos[1]; i++)
799 sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
800 sta_ptr->tdls_cap.rates_len += pos[1];
801 break;
802 case WLAN_EID_HT_CAPABILITY:
803 memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
804 sizeof(struct ieee80211_ht_cap));
805 sta_ptr->is_11n_enabled = 1;
806 break;
807 case WLAN_EID_HT_OPERATION:
808 memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
809 sizeof(struct ieee80211_ht_operation));
810 break;
811 case WLAN_EID_BSS_COEX_2040:
812 sta_ptr->tdls_cap.coex_2040 = pos[2];
813 break;
814 case WLAN_EID_EXT_CAPABILITY:
815 memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
816 sizeof(struct ieee_types_header) +
817 min_t(u8, pos[1], 8));
818 break;
819 case WLAN_EID_RSN:
820 memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
821 sizeof(struct ieee_types_header) + pos[1]);
822 break;
823 case WLAN_EID_QOS_CAPA:
824 sta_ptr->tdls_cap.qos_info = pos[2];
825 break;
826 case WLAN_EID_VHT_OPERATION:
827 if (priv->adapter->is_hw_11ac_capable)
828 memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
829 sizeof(struct ieee80211_vht_operation));
830 break;
831 case WLAN_EID_VHT_CAPABILITY:
832 if (priv->adapter->is_hw_11ac_capable) {
833 memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
834 sizeof(struct ieee80211_vht_cap));
835 sta_ptr->is_11ac_enabled = 1;
836 }
837 break;
838 case WLAN_EID_AID:
839 if (priv->adapter->is_hw_11ac_capable)
840 sta_ptr->tdls_cap.aid =
841 le16_to_cpu(*(__le16 *)(pos + 2));
842 default:
843 break;
844 }
845 }
846
847 return;
848}
849
850static int
851mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
852{
853 struct mwifiex_sta_node *sta_ptr;
854 struct mwifiex_ds_tdls_oper tdls_oper;
855
856 memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
857 sta_ptr = mwifiex_get_sta_entry(priv, peer);
858
859 if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
860 dev_err(priv->adapter->dev,
861 "link absent for peer %pM; cannot config\n", peer);
862 return -EINVAL;
863 }
864
865 memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
866 tdls_oper.tdls_action = MWIFIEX_TDLS_CONFIG_LINK;
867 return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
868 HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
869}
870
871static int
872mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
873{
874 struct mwifiex_sta_node *sta_ptr;
875 struct mwifiex_ds_tdls_oper tdls_oper;
876
877 memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
878 sta_ptr = mwifiex_get_sta_entry(priv, peer);
879
880 if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
881 dev_dbg(priv->adapter->dev,
882 "Setup already in progress for peer %pM\n", peer);
883 return 0;
884 }
885
886 sta_ptr = mwifiex_add_sta_entry(priv, peer);
887 if (!sta_ptr)
888 return -ENOMEM;
889
890 sta_ptr->tdls_status = TDLS_SETUP_INPROGRESS;
891 mwifiex_hold_tdls_packets(priv, peer);
892 memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
893 tdls_oper.tdls_action = MWIFIEX_TDLS_CREATE_LINK;
894 return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
895 HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
896}
897
898static int
899mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
900{
901 struct mwifiex_sta_node *sta_ptr;
902 struct mwifiex_ds_tdls_oper tdls_oper;
903 unsigned long flags;
904
905 memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
906 sta_ptr = mwifiex_get_sta_entry(priv, peer);
907
908 if (sta_ptr) {
909 if (sta_ptr->is_11n_enabled) {
910 mwifiex_11n_cleanup_reorder_tbl(priv);
911 spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
912 flags);
913 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
914 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
915 flags);
916 }
917 mwifiex_del_sta_entry(priv, peer);
918 }
919
920 mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
921 memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
922 tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
923 return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
924 HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
925}
926
927static int
928mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
929{
930 struct mwifiex_sta_node *sta_ptr;
931 struct ieee80211_mcs_info mcs;
932 unsigned long flags;
933 int i;
934
935 sta_ptr = mwifiex_get_sta_entry(priv, peer);
936
937 if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
938 dev_dbg(priv->adapter->dev,
939 "tdls: enable link %pM success\n", peer);
940
941 sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
942
943 mcs = sta_ptr->tdls_cap.ht_capb.mcs;
944 if (mcs.rx_mask[0] != 0xff)
945 sta_ptr->is_11n_enabled = true;
946 if (sta_ptr->is_11n_enabled) {
947 if (le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info) &
948 IEEE80211_HT_CAP_MAX_AMSDU)
949 sta_ptr->max_amsdu =
950 MWIFIEX_TX_DATA_BUF_SIZE_8K;
951 else
952 sta_ptr->max_amsdu =
953 MWIFIEX_TX_DATA_BUF_SIZE_4K;
954
955 for (i = 0; i < MAX_NUM_TID; i++)
956 sta_ptr->ampdu_sta[i] =
957 priv->aggr_prio_tbl[i].ampdu_user;
958 } else {
959 for (i = 0; i < MAX_NUM_TID; i++)
960 sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
961 }
962
963 memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
964 mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
965 } else {
966 dev_dbg(priv->adapter->dev,
967 "tdls: enable link %pM failed\n", peer);
968 if (sta_ptr) {
969 mwifiex_11n_cleanup_reorder_tbl(priv);
970 spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
971 flags);
972 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
973 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
974 flags);
975 mwifiex_del_sta_entry(priv, peer);
976 }
977 mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
978
979 return -1;
980 }
981
982 return 0;
983}
984
985int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
986{
987 switch (action) {
988 case MWIFIEX_TDLS_ENABLE_LINK:
989 return mwifiex_tdls_process_enable_link(priv, peer);
990 case MWIFIEX_TDLS_DISABLE_LINK:
991 return mwifiex_tdls_process_disable_link(priv, peer);
992 case MWIFIEX_TDLS_CREATE_LINK:
993 return mwifiex_tdls_process_create_link(priv, peer);
994 case MWIFIEX_TDLS_CONFIG_LINK:
995 return mwifiex_tdls_process_config_link(priv, peer);
996 }
997 return 0;
998}
999
1000int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
1001{
1002 struct mwifiex_sta_node *sta_ptr;
1003
1004 sta_ptr = mwifiex_get_sta_entry(priv, mac);
1005 if (sta_ptr)
1006 return sta_ptr->tdls_status;
1007
1008 return TDLS_NOT_SETUP;
1009}
1010
1011void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
1012{
1013 struct mwifiex_sta_node *sta_ptr;
1014 struct mwifiex_ds_tdls_oper tdls_oper;
1015 unsigned long flags;
1016
1017 if (list_empty(&priv->sta_list))
1018 return;
1019
1020 list_for_each_entry(sta_ptr, &priv->sta_list, list) {
1021 memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
1022
1023 if (sta_ptr->is_11n_enabled) {
1024 mwifiex_11n_cleanup_reorder_tbl(priv);
1025 spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
1026 flags);
1027 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
1028 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1029 flags);
1030 }
1031
1032 mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
1033 TDLS_LINK_TEARDOWN);
1034 memcpy(&tdls_oper.peer_mac, sta_ptr->mac_addr, ETH_ALEN);
1035 tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
1036 if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
1037 HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
1038 dev_warn(priv->adapter->dev,
1039 "Disable link failed for TDLS peer %pM",
1040 sta_ptr->mac_addr);
1041 }
1042
1043 mwifiex_del_all_sta_list(priv);
1044}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 64424c81b44f..9be6544bdded 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -159,6 +159,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
159 struct cfg80211_ap_settings *params) 159 struct cfg80211_ap_settings *params)
160{ 160{
161 const u8 *ht_ie; 161 const u8 *ht_ie;
162 u16 cap_info;
162 163
163 if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info)) 164 if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
164 return; 165 return;
@@ -168,6 +169,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
168 if (ht_ie) { 169 if (ht_ie) {
169 memcpy(&bss_cfg->ht_cap, ht_ie + 2, 170 memcpy(&bss_cfg->ht_cap, ht_ie + 2,
170 sizeof(struct ieee80211_ht_cap)); 171 sizeof(struct ieee80211_ht_cap));
172 cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
173 memset(&bss_cfg->ht_cap.mcs, 0,
174 priv->adapter->number_of_antenna);
175 switch (GET_RXSTBC(cap_info)) {
176 case MWIFIEX_RX_STBC1:
177 /* HT_CAP 1X1 mode */
178 memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
179 break;
180 case MWIFIEX_RX_STBC12: /* fall through */
181 case MWIFIEX_RX_STBC123:
182 /* HT_CAP 2X2 mode */
183 memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
184 break;
185 default:
186 dev_warn(priv->adapter->dev,
187 "Unsupported RX-STBC, default to 2x2\n");
188 memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
189 break;
190 }
171 priv->ap_11n_enabled = 1; 191 priv->ap_11n_enabled = 1;
172 } else { 192 } else {
173 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap)); 193 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
@@ -226,8 +246,8 @@ void mwifiex_set_vht_width(struct mwifiex_private *priv,
226 if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80) 246 if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
227 vht_cfg.misc_config |= VHT_BW_80_160_80P80; 247 vht_cfg.misc_config |= VHT_BW_80_160_80P80;
228 248
229 mwifiex_send_cmd_sync(priv, HostCmd_CMD_11AC_CFG, 249 mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
230 HostCmd_ACT_GEN_SET, 0, &vht_cfg); 250 HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
231 251
232 return; 252 return;
233} 253}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 718066577c6c..92e77a398ecf 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -21,126 +21,8 @@
21#include "main.h" 21#include "main.h"
22#include "11n.h" 22#include "11n.h"
23 23
24/*
25 * This function will return the pointer to station entry in station list
26 * table which matches specified mac address.
27 * This function should be called after acquiring RA list spinlock.
28 * NULL is returned if station entry is not found in associated STA list.
29 */
30struct mwifiex_sta_node *
31mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
32{
33 struct mwifiex_sta_node *node;
34
35 if (!mac)
36 return NULL;
37
38 list_for_each_entry(node, &priv->sta_list, list) {
39 if (!memcmp(node->mac_addr, mac, ETH_ALEN))
40 return node;
41 }
42
43 return NULL;
44}
45
46/*
47 * This function will add a sta_node entry to associated station list
48 * table with the given mac address.
49 * If entry exist already, existing entry is returned.
50 * If received mac address is NULL, NULL is returned.
51 */
52static struct mwifiex_sta_node *
53mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
54{
55 struct mwifiex_sta_node *node;
56 unsigned long flags;
57
58 if (!mac)
59 return NULL;
60
61 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
62 node = mwifiex_get_sta_entry(priv, mac);
63 if (node)
64 goto done;
65
66 node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
67 if (!node)
68 goto done;
69
70 memcpy(node->mac_addr, mac, ETH_ALEN);
71 list_add_tail(&node->list, &priv->sta_list);
72
73done:
74 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
75 return node;
76}
77
78/*
79 * This function will search for HT IE in association request IEs
80 * and set station HT parameters accordingly.
81 */
82static void
83mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
84 int ies_len, struct mwifiex_sta_node *node)
85{
86 const struct ieee80211_ht_cap *ht_cap;
87
88 if (!ies)
89 return;
90 24
91 ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
92 if (ht_cap) {
93 node->is_11n_enabled = 1;
94 node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
95 IEEE80211_HT_CAP_MAX_AMSDU ?
96 MWIFIEX_TX_DATA_BUF_SIZE_8K :
97 MWIFIEX_TX_DATA_BUF_SIZE_4K;
98 } else {
99 node->is_11n_enabled = 0;
100 }
101 25
102 return;
103}
104
105/*
106 * This function will delete a station entry from station list
107 */
108static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
109{
110 struct mwifiex_sta_node *node;
111 unsigned long flags;
112
113 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
114
115 node = mwifiex_get_sta_entry(priv, mac);
116 if (node) {
117 list_del(&node->list);
118 kfree(node);
119 }
120
121 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
122 return;
123}
124
125/*
126 * This function will delete all stations from associated station list.
127 */
128static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
129{
130 struct mwifiex_sta_node *node, *tmp;
131 unsigned long flags;
132
133 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
134
135 list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
136 list_del(&node->list);
137 kfree(node);
138 }
139
140 INIT_LIST_HEAD(&priv->sta_list);
141 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
142 return;
143}
144 26
145/* 27/*
146 * This function handles AP interface specific events generated by firmware. 28 * This function handles AP interface specific events generated by firmware.
@@ -268,9 +150,9 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
268 case EVENT_ADDBA: 150 case EVENT_ADDBA:
269 dev_dbg(adapter->dev, "event: ADDBA Request\n"); 151 dev_dbg(adapter->dev, "event: ADDBA Request\n");
270 if (priv->media_connected) 152 if (priv->media_connected)
271 mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP, 153 mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
272 HostCmd_ACT_GEN_SET, 0, 154 HostCmd_ACT_GEN_SET, 0,
273 adapter->event_body); 155 adapter->event_body, false);
274 break; 156 break;
275 case EVENT_DELBA: 157 case EVENT_DELBA:
276 dev_dbg(adapter->dev, "event: DELBA Request\n"); 158 dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -284,6 +166,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
284 mwifiex_11n_ba_stream_timeout(priv, ba_timeout); 166 mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
285 } 167 }
286 break; 168 break;
169 case EVENT_EXT_SCAN_REPORT:
170 dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
171 if (adapter->ext_scan)
172 return mwifiex_handle_event_ext_scan_report(priv,
173 adapter->event_skb->data);
174 break;
287 default: 175 default:
288 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 176 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
289 eventcause); 177 eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 3c74eb254927..9a56bc61cb1d 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -284,27 +284,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
284 return 0; 284 return 0;
285 } 285 }
286 286
287 if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) { 287 if (rx_pkt_type == PKT_TYPE_MGMT) {
288 struct sk_buff_head list;
289 struct sk_buff *rx_skb;
290
291 __skb_queue_head_init(&list);
292 skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
293 skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
294
295 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
296 priv->wdev->iftype, 0, false);
297
298 while (!skb_queue_empty(&list)) {
299 rx_skb = __skb_dequeue(&list);
300 ret = mwifiex_recv_packet(priv, rx_skb);
301 if (ret)
302 dev_err(adapter->dev,
303 "AP:Rx A-MSDU failed");
304 }
305
306 return 0;
307 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
308 ret = mwifiex_process_mgmt_packet(priv, skb); 288 ret = mwifiex_process_mgmt_packet(priv, skb);
309 if (ret) 289 if (ret)
310 dev_err(adapter->dev, "Rx of mgmt packet failed"); 290 dev_err(adapter->dev, "Rx of mgmt packet failed");
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 208748804a55..edbe4aff00d8 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -459,6 +459,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
459 * 'suspended' state and a 'disconnect' one. 459 * 'suspended' state and a 'disconnect' one.
460 */ 460 */
461 adapter->is_suspended = true; 461 adapter->is_suspended = true;
462 adapter->hs_enabling = false;
462 463
463 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) 464 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
464 usb_kill_urb(card->rx_cmd.urb); 465 usb_kill_urb(card->rx_cmd.urb);
@@ -766,11 +767,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
766 switch (le16_to_cpu(card->udev->descriptor.idProduct)) { 767 switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
767 case USB8897_PID_1: 768 case USB8897_PID_1:
768 case USB8897_PID_2: 769 case USB8897_PID_2:
770 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
769 strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME); 771 strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
770 break; 772 break;
771 case USB8797_PID_1: 773 case USB8797_PID_1:
772 case USB8797_PID_2: 774 case USB8797_PID_2:
773 default: 775 default:
776 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
774 strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME); 777 strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
775 break; 778 break;
776 } 779 }
@@ -1024,7 +1027,6 @@ static void mwifiex_usb_cleanup_module(void)
1024 1027
1025 if (usb_card && usb_card->adapter) { 1028 if (usb_card && usb_card->adapter) {
1026 struct mwifiex_adapter *adapter = usb_card->adapter; 1029 struct mwifiex_adapter *adapter = usb_card->adapter;
1027 int i;
1028 1030
1029 /* In case driver is removed when asynchronous FW downloading is 1031 /* In case driver is removed when asynchronous FW downloading is
1030 * in progress 1032 * in progress
@@ -1035,11 +1037,8 @@ static void mwifiex_usb_cleanup_module(void)
1035 if (adapter->is_suspended) 1037 if (adapter->is_suspended)
1036 mwifiex_usb_resume(usb_card->intf); 1038 mwifiex_usb_resume(usb_card->intf);
1037#endif 1039#endif
1038 for (i = 0; i < adapter->priv_num; i++) 1040
1039 if ((GET_BSS_ROLE(adapter->priv[i]) == 1041 mwifiex_deauthenticate_all(adapter);
1040 MWIFIEX_BSS_ROLE_STA) &&
1041 adapter->priv[i]->media_connected)
1042 mwifiex_deauthenticate(adapter->priv[i], NULL);
1043 1042
1044 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter, 1043 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
1045 MWIFIEX_BSS_ROLE_ANY), 1044 MWIFIEX_BSS_ROLE_ANY),
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 9b82e225880c..c3824e37f3f2 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -72,7 +72,7 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
72 return -1; 72 return -1;
73 } 73 }
74 74
75 return mwifiex_send_cmd_sync(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL); 75 return mwifiex_send_cmd(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL, true);
76} 76}
77EXPORT_SYMBOL_GPL(mwifiex_init_shutdown_fw); 77EXPORT_SYMBOL_GPL(mwifiex_init_shutdown_fw);
78 78
@@ -104,6 +104,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
104 info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try; 104 info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
105 info->is_hs_configured = adapter->is_hs_configured; 105 info->is_hs_configured = adapter->is_hs_configured;
106 info->hs_activated = adapter->hs_activated; 106 info->hs_activated = adapter->hs_activated;
107 info->is_cmd_timedout = adapter->is_cmd_timedout;
107 info->num_cmd_host_to_card_failure 108 info->num_cmd_host_to_card_failure
108 = adapter->dbg.num_cmd_host_to_card_failure; 109 = adapter->dbg.num_cmd_host_to_card_failure;
109 info->num_cmd_sleep_cfm_host_to_card_failure 110 info->num_cmd_sleep_cfm_host_to_card_failure
@@ -119,7 +120,6 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
119 info->num_cmd_assoc_failure = 120 info->num_cmd_assoc_failure =
120 adapter->dbg.num_cmd_assoc_failure; 121 adapter->dbg.num_cmd_assoc_failure;
121 info->num_tx_timeout = adapter->dbg.num_tx_timeout; 122 info->num_tx_timeout = adapter->dbg.num_tx_timeout;
122 info->num_cmd_timeout = adapter->dbg.num_cmd_timeout;
123 info->timeout_cmd_id = adapter->dbg.timeout_cmd_id; 123 info->timeout_cmd_id = adapter->dbg.timeout_cmd_id;
124 info->timeout_cmd_act = adapter->dbg.timeout_cmd_act; 124 info->timeout_cmd_act = adapter->dbg.timeout_cmd_act;
125 memcpy(info->last_cmd_id, adapter->dbg.last_cmd_id, 125 memcpy(info->last_cmd_id, adapter->dbg.last_cmd_id,
@@ -252,3 +252,117 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
252 252
253 return 0; 253 return 0;
254} 254}
255
256/* This function will return the pointer to station entry in station list
257 * table which matches specified mac address.
258 * This function should be called after acquiring RA list spinlock.
259 * NULL is returned if station entry is not found in associated STA list.
260 */
261struct mwifiex_sta_node *
262mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
263{
264 struct mwifiex_sta_node *node;
265
266 if (!mac)
267 return NULL;
268
269 list_for_each_entry(node, &priv->sta_list, list) {
270 if (!memcmp(node->mac_addr, mac, ETH_ALEN))
271 return node;
272 }
273
274 return NULL;
275}
276
277/* This function will add a sta_node entry to associated station list
278 * table with the given mac address.
279 * If entry exist already, existing entry is returned.
280 * If received mac address is NULL, NULL is returned.
281 */
282struct mwifiex_sta_node *
283mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
284{
285 struct mwifiex_sta_node *node;
286 unsigned long flags;
287
288 if (!mac)
289 return NULL;
290
291 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
292 node = mwifiex_get_sta_entry(priv, mac);
293 if (node)
294 goto done;
295
296 node = kzalloc(sizeof(*node), GFP_ATOMIC);
297 if (!node)
298 goto done;
299
300 memcpy(node->mac_addr, mac, ETH_ALEN);
301 list_add_tail(&node->list, &priv->sta_list);
302
303done:
304 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
305 return node;
306}
307
308/* This function will search for HT IE in association request IEs
309 * and set station HT parameters accordingly.
310 */
311void
312mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
313 int ies_len, struct mwifiex_sta_node *node)
314{
315 const struct ieee80211_ht_cap *ht_cap;
316
317 if (!ies)
318 return;
319
320 ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
321 if (ht_cap) {
322 node->is_11n_enabled = 1;
323 node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
324 IEEE80211_HT_CAP_MAX_AMSDU ?
325 MWIFIEX_TX_DATA_BUF_SIZE_8K :
326 MWIFIEX_TX_DATA_BUF_SIZE_4K;
327 } else {
328 node->is_11n_enabled = 0;
329 }
330
331 return;
332}
333
334/* This function will delete a station entry from station list */
335void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
336{
337 struct mwifiex_sta_node *node;
338 unsigned long flags;
339
340 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
341
342 node = mwifiex_get_sta_entry(priv, mac);
343 if (node) {
344 list_del(&node->list);
345 kfree(node);
346 }
347
348 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
349 return;
350}
351
352/* This function will delete all stations from associated station list. */
353void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
354{
355 struct mwifiex_sta_node *node, *tmp;
356 unsigned long flags;
357
358 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
359
360 list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
361 list_del(&node->list);
362 kfree(node);
363 }
364
365 INIT_LIST_HEAD(&priv->sta_list);
366 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
367 return;
368}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index cb2d0582bd36..ddae57021397 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -30,8 +30,24 @@ static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
30 return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t)); 30 return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
31} 31}
32 32
33static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa) 33struct mwifiex_dma_mapping {
34 dma_addr_t addr;
35 size_t len;
36};
37
38static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
39 struct mwifiex_dma_mapping *mapping)
34{ 40{
35 memcpy(buf_pa, skb->cb, sizeof(dma_addr_t)); 41 memcpy(mapping, skb->cb, sizeof(*mapping));
36} 42}
43
44static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
45{
46 struct mwifiex_dma_mapping mapping;
47
48 MWIFIEX_SKB_PACB(skb, &mapping);
49
50 return mapping.addr;
51}
52
37#endif /* !_MWIFIEX_UTIL_H_ */ 53#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 981cf6e7c73b..0a7cc742aed7 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -37,8 +37,8 @@
37/* Offset for TOS field in the IP header */ 37/* Offset for TOS field in the IP header */
38#define IPTOS_OFFSET 5 38#define IPTOS_OFFSET 5
39 39
40static bool enable_tx_amsdu; 40static bool disable_tx_amsdu;
41module_param(enable_tx_amsdu, bool, 0644); 41module_param(disable_tx_amsdu, bool, 0644);
42 42
43/* WMM information IE */ 43/* WMM information IE */
44static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, 44static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
@@ -64,21 +64,6 @@ static u8 tos_to_tid[] = {
64 0x07 /* 1 1 1 AC_VO */ 64 0x07 /* 1 1 1 AC_VO */
65}; 65};
66 66
67/*
68 * This table inverses the tos_to_tid operation to get a priority
69 * which is in sequential order, and can be compared.
70 * Use this to compare the priority of two different TIDs.
71 */
72static u8 tos_to_tid_inv[] = {
73 0x02, /* from tos_to_tid[2] = 0 */
74 0x00, /* from tos_to_tid[0] = 1 */
75 0x01, /* from tos_to_tid[1] = 2 */
76 0x03,
77 0x04,
78 0x05,
79 0x06,
80 0x07};
81
82static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} }; 67static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
83 68
84/* 69/*
@@ -175,8 +160,15 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
175 break; 160 break;
176 161
177 ra_list->is_11n_enabled = 0; 162 ra_list->is_11n_enabled = 0;
163 ra_list->tdls_link = false;
178 if (!mwifiex_queuing_ra_based(priv)) { 164 if (!mwifiex_queuing_ra_based(priv)) {
179 ra_list->is_11n_enabled = IS_11N_ENABLED(priv); 165 if (mwifiex_get_tdls_link_status(priv, ra) ==
166 TDLS_SETUP_COMPLETE) {
167 ra_list->is_11n_enabled =
168 mwifiex_tdls_peer_11n_enabled(priv, ra);
169 } else {
170 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
171 }
180 } else { 172 } else {
181 ra_list->is_11n_enabled = 173 ra_list->is_11n_enabled =
182 mwifiex_is_sta_11n_enabled(priv, node); 174 mwifiex_is_sta_11n_enabled(priv, node);
@@ -213,8 +205,9 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
213 * This function map ACs to TIDs. 205 * This function map ACs to TIDs.
214 */ 206 */
215static void 207static void
216mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm) 208mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
217{ 209{
210 struct mwifiex_wmm_desc *wmm = &priv->wmm;
218 u8 *queue_priority = wmm->queue_priority; 211 u8 *queue_priority = wmm->queue_priority;
219 int i; 212 int i;
220 213
@@ -224,7 +217,7 @@ mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
224 } 217 }
225 218
226 for (i = 0; i < MAX_NUM_TID; ++i) 219 for (i = 0; i < MAX_NUM_TID; ++i)
227 tos_to_tid_inv[tos_to_tid[i]] = (u8)i; 220 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
228 221
229 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID); 222 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
230} 223}
@@ -285,7 +278,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
285 } 278 }
286 } 279 }
287 280
288 mwifiex_wmm_queue_priorities_tid(&priv->wmm); 281 mwifiex_wmm_queue_priorities_tid(priv);
289} 282}
290 283
291/* 284/*
@@ -388,8 +381,7 @@ mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
388 * AP is disabled (due to call admission control (ACM bit). Mapping 381 * AP is disabled (due to call admission control (ACM bit). Mapping
389 * of TID to AC is taken care of internally. 382 * of TID to AC is taken care of internally.
390 */ 383 */
391static u8 384u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
392mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
393{ 385{
394 enum mwifiex_wmm_ac_e ac, ac_down; 386 enum mwifiex_wmm_ac_e ac, ac_down;
395 u8 new_tid; 387 u8 new_tid;
@@ -421,9 +413,17 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
421 continue; 413 continue;
422 414
423 for (i = 0; i < MAX_NUM_TID; ++i) { 415 for (i = 0; i < MAX_NUM_TID; ++i) {
424 priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i]; 416 if (!disable_tx_amsdu &&
425 priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i]; 417 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
426 priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i]; 418 priv->aggr_prio_tbl[i].amsdu =
419 priv->tos_to_tid_inv[i];
420 else
421 priv->aggr_prio_tbl[i].amsdu =
422 BA_STREAM_NOT_ALLOWED;
423 priv->aggr_prio_tbl[i].ampdu_ap =
424 priv->tos_to_tid_inv[i];
425 priv->aggr_prio_tbl[i].ampdu_user =
426 priv->tos_to_tid_inv[i];
427 } 427 }
428 428
429 priv->aggr_prio_tbl[6].amsdu 429 priv->aggr_prio_tbl[6].amsdu
@@ -546,6 +546,7 @@ void
546mwifiex_clean_txrx(struct mwifiex_private *priv) 546mwifiex_clean_txrx(struct mwifiex_private *priv)
547{ 547{
548 unsigned long flags; 548 unsigned long flags;
549 struct sk_buff *skb, *tmp;
549 550
550 mwifiex_11n_cleanup_reorder_tbl(priv); 551 mwifiex_11n_cleanup_reorder_tbl(priv);
551 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); 552 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
@@ -563,6 +564,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
563 !priv->adapter->surprise_removed) 564 !priv->adapter->surprise_removed)
564 priv->adapter->if_ops.clean_pcie_ring(priv->adapter); 565 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
565 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 566 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
567
568 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
569 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
566} 570}
567 571
568/* 572/*
@@ -591,7 +595,7 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
591 * If no such node is found, a new node is added first and then 595 * If no such node is found, a new node is added first and then
592 * retrieved. 596 * retrieved.
593 */ 597 */
594static struct mwifiex_ra_list_tbl * 598struct mwifiex_ra_list_tbl *
595mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr) 599mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
596{ 600{
597 struct mwifiex_ra_list_tbl *ra_list; 601 struct mwifiex_ra_list_tbl *ra_list;
@@ -641,6 +645,21 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
641 struct mwifiex_ra_list_tbl *ra_list; 645 struct mwifiex_ra_list_tbl *ra_list;
642 u8 ra[ETH_ALEN], tid_down; 646 u8 ra[ETH_ALEN], tid_down;
643 unsigned long flags; 647 unsigned long flags;
648 struct list_head list_head;
649 int tdls_status = TDLS_NOT_SETUP;
650 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
651 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
652
653 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
654
655 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
656 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
657 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
658 dev_dbg(adapter->dev,
659 "TDLS setup packet for %pM. Don't block\n", ra);
660 else
661 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
662 }
644 663
645 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) { 664 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
646 dev_dbg(adapter->dev, "data: drop packet in disconnect\n"); 665 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
@@ -659,12 +678,27 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
659 have only 1 raptr for a tid in case of infra */ 678 have only 1 raptr for a tid in case of infra */
660 if (!mwifiex_queuing_ra_based(priv) && 679 if (!mwifiex_queuing_ra_based(priv) &&
661 !mwifiex_is_skb_mgmt_frame(skb)) { 680 !mwifiex_is_skb_mgmt_frame(skb)) {
662 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list)) 681 switch (tdls_status) {
663 ra_list = list_first_entry( 682 case TDLS_SETUP_COMPLETE:
664 &priv->wmm.tid_tbl_ptr[tid_down].ra_list, 683 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
665 struct mwifiex_ra_list_tbl, list); 684 ra);
666 else 685 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
667 ra_list = NULL; 686 break;
687 case TDLS_SETUP_INPROGRESS:
688 skb_queue_tail(&priv->tdls_txq, skb);
689 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
690 flags);
691 return;
692 default:
693 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
694 if (!list_empty(&list_head))
695 ra_list = list_first_entry(
696 &list_head, struct mwifiex_ra_list_tbl,
697 list);
698 else
699 ra_list = NULL;
700 break;
701 }
668 } else { 702 } else {
669 memcpy(ra, skb->data, ETH_ALEN); 703 memcpy(ra, skb->data, ETH_ALEN);
670 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb)) 704 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
@@ -684,9 +718,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
684 ra_list->total_pkt_count++; 718 ra_list->total_pkt_count++;
685 719
686 if (atomic_read(&priv->wmm.highest_queued_prio) < 720 if (atomic_read(&priv->wmm.highest_queued_prio) <
687 tos_to_tid_inv[tid_down]) 721 priv->tos_to_tid_inv[tid_down])
688 atomic_set(&priv->wmm.highest_queued_prio, 722 atomic_set(&priv->wmm.highest_queued_prio,
689 tos_to_tid_inv[tid_down]); 723 priv->tos_to_tid_inv[tid_down]);
690 724
691 atomic_inc(&priv->wmm.tx_pkts_queued); 725 atomic_inc(&priv->wmm.tx_pkts_queued);
692 726
@@ -1219,15 +1253,24 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1219 1253
1220 if (!ptr->is_11n_enabled || 1254 if (!ptr->is_11n_enabled ||
1221 mwifiex_is_ba_stream_setup(priv, ptr, tid) || 1255 mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
1222 priv->wps.session_enable || 1256 priv->wps.session_enable) {
1223 ((priv->sec_info.wpa_enabled || 1257 if (ptr->is_11n_enabled &&
1224 priv->sec_info.wpa2_enabled) && 1258 mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
1225 !priv->wpa_is_gtk_set)) { 1259 mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
1226 mwifiex_send_single_packet(priv, ptr, ptr_index, flags); 1260 mwifiex_is_amsdu_allowed(priv, tid) &&
1227 /* ra_list_spinlock has been freed in 1261 mwifiex_is_11n_aggragation_possible(priv, ptr,
1228 mwifiex_send_single_packet() */ 1262 adapter->tx_buf_size))
1263 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1264 /* ra_list_spinlock has been freed in
1265 * mwifiex_11n_aggregate_pkt()
1266 */
1267 else
1268 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1269 /* ra_list_spinlock has been freed in
1270 * mwifiex_send_single_packet()
1271 */
1229 } else { 1272 } else {
1230 if (mwifiex_is_ampdu_allowed(priv, tid) && 1273 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1231 ptr->ba_pkt_count > ptr->ba_packet_thr) { 1274 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1232 if (mwifiex_space_avail_for_new_ba_stream(adapter)) { 1275 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1233 mwifiex_create_ba_tbl(priv, ptr->ra, tid, 1276 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
@@ -1240,7 +1283,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1240 mwifiex_send_delba(priv, tid_del, ra, 1); 1283 mwifiex_send_delba(priv, tid_del, ra, 1);
1241 } 1284 }
1242 } 1285 }
1243 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && 1286 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1244 mwifiex_is_11n_aggragation_possible(priv, ptr, 1287 mwifiex_is_11n_aggragation_possible(priv, ptr,
1245 adapter->tx_buf_size)) 1288 adapter->tx_buf_size))
1246 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); 1289 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 0f129d498fb1..83e42083ebff 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -34,6 +34,21 @@ enum ieee_types_wmm_ecw_bitmasks {
34static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; 34static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
35 35
36/* 36/*
37 * This table inverses the tos_to_tid operation to get a priority
38 * which is in sequential order, and can be compared.
39 * Use this to compare the priority of two different TIDs.
40 */
41static const u8 tos_to_tid_inv[] = {
42 0x02, /* from tos_to_tid[2] = 0 */
43 0x00, /* from tos_to_tid[0] = 1 */
44 0x01, /* from tos_to_tid[1] = 2 */
45 0x03,
46 0x04,
47 0x05,
48 0x06,
49 0x07};
50
51/*
37 * This function retrieves the TID of the given RA list. 52 * This function retrieves the TID of the given RA list.
38 */ 53 */
39static inline int 54static inline int
@@ -107,5 +122,8 @@ void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
107void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv); 122void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
108int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, 123int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
109 const struct host_cmd_ds_command *resp); 124 const struct host_cmd_ds_command *resp);
125struct mwifiex_ra_list_tbl *
126mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
127u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
110 128
111#endif /* !_MWIFIEX_WMM_H_ */ 129#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 4987c3f942ce..3c0a0a86ba12 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -81,6 +81,9 @@ MODULE_PARM_DESC(ap_mode_default,
81 */ 81 */
82 82
83#define MWL8K_HW_TIMER_REGISTER 0x0000a600 83#define MWL8K_HW_TIMER_REGISTER 0x0000a600
84#define BBU_RXRDY_CNT_REG 0x0000a860
85#define NOK_CCA_CNT_REG 0x0000a6a0
86#define BBU_AVG_NOISE_VAL 0x67
84 87
85#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \ 88#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \
86 MWL8K_A2H_INT_CHNL_SWITCHED | \ 89 MWL8K_A2H_INT_CHNL_SWITCHED | \
@@ -112,6 +115,8 @@ MODULE_PARM_DESC(ap_mode_default,
112 */ 115 */
113#define MWL8K_NUM_AMPDU_STREAMS (TOTAL_HW_TX_QUEUES - 1) 116#define MWL8K_NUM_AMPDU_STREAMS (TOTAL_HW_TX_QUEUES - 1)
114 117
118#define MWL8K_NUM_CHANS 18
119
115struct rxd_ops { 120struct rxd_ops {
116 int rxd_size; 121 int rxd_size;
117 void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr); 122 void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
@@ -289,6 +294,12 @@ struct mwl8k_priv {
289 294
290 /* bitmap of running BSSes */ 295 /* bitmap of running BSSes */
291 u32 running_bsses; 296 u32 running_bsses;
297
298 /* ACS related */
299 bool sw_scan_start;
300 struct ieee80211_channel *acs_chan;
301 unsigned long channel_time;
302 struct survey_info survey[MWL8K_NUM_CHANS];
292}; 303};
293 304
294#define MAX_WEP_KEY_LEN 13 305#define MAX_WEP_KEY_LEN 13
@@ -396,6 +407,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
396#define MWL8K_CMD_SET_HW_SPEC 0x0004 407#define MWL8K_CMD_SET_HW_SPEC 0x0004
397#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010 408#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
398#define MWL8K_CMD_GET_STAT 0x0014 409#define MWL8K_CMD_GET_STAT 0x0014
410#define MWL8K_CMD_BBP_REG_ACCESS 0x001a
399#define MWL8K_CMD_RADIO_CONTROL 0x001c 411#define MWL8K_CMD_RADIO_CONTROL 0x001c
400#define MWL8K_CMD_RF_TX_POWER 0x001e 412#define MWL8K_CMD_RF_TX_POWER 0x001e
401#define MWL8K_CMD_TX_POWER 0x001f 413#define MWL8K_CMD_TX_POWER 0x001f
@@ -2987,6 +2999,47 @@ static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
2987} 2999}
2988 3000
2989/* 3001/*
3002 * CMD_BBP_REG_ACCESS.
3003 */
3004struct mwl8k_cmd_bbp_reg_access {
3005 struct mwl8k_cmd_pkt header;
3006 __le16 action;
3007 __le16 offset;
3008 u8 value;
3009 u8 rsrv[3];
3010} __packed;
3011
3012static int
3013mwl8k_cmd_bbp_reg_access(struct ieee80211_hw *hw,
3014 u16 action,
3015 u16 offset,
3016 u8 *value)
3017{
3018 struct mwl8k_cmd_bbp_reg_access *cmd;
3019 int rc;
3020
3021 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3022 if (cmd == NULL)
3023 return -ENOMEM;
3024
3025 cmd->header.code = cpu_to_le16(MWL8K_CMD_BBP_REG_ACCESS);
3026 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3027 cmd->action = cpu_to_le16(action);
3028 cmd->offset = cpu_to_le16(offset);
3029
3030 rc = mwl8k_post_cmd(hw, &cmd->header);
3031
3032 if (!rc)
3033 *value = cmd->value;
3034 else
3035 *value = 0;
3036
3037 kfree(cmd);
3038
3039 return rc;
3040}
3041
3042/*
2990 * CMD_SET_POST_SCAN. 3043 * CMD_SET_POST_SCAN.
2991 */ 3044 */
2992struct mwl8k_cmd_set_post_scan { 3045struct mwl8k_cmd_set_post_scan {
@@ -3016,6 +3069,64 @@ mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
3016 return rc; 3069 return rc;
3017} 3070}
3018 3071
3072static int freq_to_idx(struct mwl8k_priv *priv, int freq)
3073{
3074 struct ieee80211_supported_band *sband;
3075 int band, ch, idx = 0;
3076
3077 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
3078 sband = priv->hw->wiphy->bands[band];
3079 if (!sband)
3080 continue;
3081
3082 for (ch = 0; ch < sband->n_channels; ch++, idx++)
3083 if (sband->channels[ch].center_freq == freq)
3084 goto exit;
3085 }
3086
3087exit:
3088 return idx;
3089}
3090
3091static void mwl8k_update_survey(struct mwl8k_priv *priv,
3092 struct ieee80211_channel *channel)
3093{
3094 u32 cca_cnt, rx_rdy;
3095 s8 nf = 0, idx;
3096 struct survey_info *survey;
3097
3098 idx = freq_to_idx(priv, priv->acs_chan->center_freq);
3099 if (idx >= MWL8K_NUM_CHANS) {
3100 wiphy_err(priv->hw->wiphy, "Failed to update survey\n");
3101 return;
3102 }
3103
3104 survey = &priv->survey[idx];
3105
3106 cca_cnt = ioread32(priv->regs + NOK_CCA_CNT_REG);
3107 cca_cnt /= 1000; /* uSecs to mSecs */
3108 survey->channel_time_busy = (u64) cca_cnt;
3109
3110 rx_rdy = ioread32(priv->regs + BBU_RXRDY_CNT_REG);
3111 rx_rdy /= 1000; /* uSecs to mSecs */
3112 survey->channel_time_rx = (u64) rx_rdy;
3113
3114 priv->channel_time = jiffies - priv->channel_time;
3115 survey->channel_time = jiffies_to_msecs(priv->channel_time);
3116
3117 survey->channel = channel;
3118
3119 mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &nf);
3120
3121 /* Make sure sign is negative else ACS at hostapd fails */
3122 survey->noise = nf * -1;
3123
3124 survey->filled = SURVEY_INFO_NOISE_DBM |
3125 SURVEY_INFO_CHANNEL_TIME |
3126 SURVEY_INFO_CHANNEL_TIME_BUSY |
3127 SURVEY_INFO_CHANNEL_TIME_RX;
3128}
3129
3019/* 3130/*
3020 * CMD_SET_RF_CHANNEL. 3131 * CMD_SET_RF_CHANNEL.
3021 */ 3132 */
@@ -3033,6 +3144,7 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
3033 enum nl80211_channel_type channel_type = 3144 enum nl80211_channel_type channel_type =
3034 cfg80211_get_chandef_type(&conf->chandef); 3145 cfg80211_get_chandef_type(&conf->chandef);
3035 struct mwl8k_cmd_set_rf_channel *cmd; 3146 struct mwl8k_cmd_set_rf_channel *cmd;
3147 struct mwl8k_priv *priv = hw->priv;
3036 int rc; 3148 int rc;
3037 3149
3038 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 3150 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -3049,13 +3161,29 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
3049 else if (channel->band == IEEE80211_BAND_5GHZ) 3161 else if (channel->band == IEEE80211_BAND_5GHZ)
3050 cmd->channel_flags |= cpu_to_le32(0x00000004); 3162 cmd->channel_flags |= cpu_to_le32(0x00000004);
3051 3163
3052 if (channel_type == NL80211_CHAN_NO_HT || 3164 if (!priv->sw_scan_start) {
3053 channel_type == NL80211_CHAN_HT20) 3165 if (channel_type == NL80211_CHAN_NO_HT ||
3166 channel_type == NL80211_CHAN_HT20)
3167 cmd->channel_flags |= cpu_to_le32(0x00000080);
3168 else if (channel_type == NL80211_CHAN_HT40MINUS)
3169 cmd->channel_flags |= cpu_to_le32(0x000001900);
3170 else if (channel_type == NL80211_CHAN_HT40PLUS)
3171 cmd->channel_flags |= cpu_to_le32(0x000000900);
3172 } else {
3054 cmd->channel_flags |= cpu_to_le32(0x00000080); 3173 cmd->channel_flags |= cpu_to_le32(0x00000080);
3055 else if (channel_type == NL80211_CHAN_HT40MINUS) 3174 }
3056 cmd->channel_flags |= cpu_to_le32(0x000001900); 3175
3057 else if (channel_type == NL80211_CHAN_HT40PLUS) 3176 if (priv->sw_scan_start) {
3058 cmd->channel_flags |= cpu_to_le32(0x000000900); 3177 /* Store current channel stats
3178 * before switching to newer one.
3179 * This will be processed only for AP fw.
3180 */
3181 if (priv->channel_time != 0)
3182 mwl8k_update_survey(priv, priv->acs_chan);
3183
3184 priv->channel_time = jiffies;
3185 priv->acs_chan = channel;
3186 }
3059 3187
3060 rc = mwl8k_post_cmd(hw, &cmd->header); 3188 rc = mwl8k_post_cmd(hw, &cmd->header);
3061 kfree(cmd); 3189 kfree(cmd);
@@ -5263,6 +5391,27 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
5263{ 5391{
5264 struct mwl8k_priv *priv = hw->priv; 5392 struct mwl8k_priv *priv = hw->priv;
5265 struct ieee80211_conf *conf = &hw->conf; 5393 struct ieee80211_conf *conf = &hw->conf;
5394 struct ieee80211_supported_band *sband;
5395
5396 if (priv->ap_fw) {
5397 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
5398
5399 if (sband && idx >= sband->n_channels) {
5400 idx -= sband->n_channels;
5401 sband = NULL;
5402 }
5403
5404 if (!sband)
5405 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
5406
5407 if (!sband || idx >= sband->n_channels)
5408 return -ENOENT;
5409
5410 memcpy(survey, &priv->survey[idx], sizeof(*survey));
5411 survey->channel = &sband->channels[idx];
5412
5413 return 0;
5414 }
5266 5415
5267 if (idx != 0) 5416 if (idx != 0)
5268 return -ENOENT; 5417 return -ENOENT;
@@ -5406,6 +5555,40 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5406 return rc; 5555 return rc;
5407} 5556}
5408 5557
5558static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
5559{
5560 struct mwl8k_priv *priv = hw->priv;
5561 u8 tmp;
5562
5563 if (!priv->ap_fw)
5564 return;
5565
5566 /* clear all stats */
5567 priv->channel_time = 0;
5568 ioread32(priv->regs + BBU_RXRDY_CNT_REG);
5569 ioread32(priv->regs + NOK_CCA_CNT_REG);
5570 mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
5571
5572 priv->sw_scan_start = true;
5573}
5574
5575static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw)
5576{
5577 struct mwl8k_priv *priv = hw->priv;
5578 u8 tmp;
5579
5580 if (!priv->ap_fw)
5581 return;
5582
5583 priv->sw_scan_start = false;
5584
5585 /* clear all stats */
5586 priv->channel_time = 0;
5587 ioread32(priv->regs + BBU_RXRDY_CNT_REG);
5588 ioread32(priv->regs + NOK_CCA_CNT_REG);
5589 mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
5590}
5591
5409static const struct ieee80211_ops mwl8k_ops = { 5592static const struct ieee80211_ops mwl8k_ops = {
5410 .tx = mwl8k_tx, 5593 .tx = mwl8k_tx,
5411 .start = mwl8k_start, 5594 .start = mwl8k_start,
@@ -5424,6 +5607,8 @@ static const struct ieee80211_ops mwl8k_ops = {
5424 .get_stats = mwl8k_get_stats, 5607 .get_stats = mwl8k_get_stats,
5425 .get_survey = mwl8k_get_survey, 5608 .get_survey = mwl8k_get_survey,
5426 .ampdu_action = mwl8k_ampdu_action, 5609 .ampdu_action = mwl8k_ampdu_action,
5610 .sw_scan_start = mwl8k_sw_scan_start,
5611 .sw_scan_complete = mwl8k_sw_scan_complete,
5427}; 5612};
5428 5613
5429static void mwl8k_finalize_join_worker(struct work_struct *work) 5614static void mwl8k_finalize_join_worker(struct work_struct *work)
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index d01edd2c50c5..a9e94b6db5b7 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -59,7 +59,8 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
59 for (i = 0; i < NUM_CHANNELS; i++) { 59 for (i = 0; i < NUM_CHANNELS; i++) {
60 if (priv->channel_mask & (1 << i)) { 60 if (priv->channel_mask & (1 << i)) {
61 priv->channels[i].center_freq = 61 priv->channels[i].center_freq =
62 ieee80211_dsss_chan_to_freq(i + 1); 62 ieee80211_channel_to_frequency(i + 1,
63 IEEE80211_BAND_2GHZ);
63 channels++; 64 channels++;
64 } 65 }
65 } 66 }
@@ -177,7 +178,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
177 if (chandef->chan->band != IEEE80211_BAND_2GHZ) 178 if (chandef->chan->band != IEEE80211_BAND_2GHZ)
178 return -EINVAL; 179 return -EINVAL;
179 180
180 channel = ieee80211_freq_to_dsss_chan(chandef->chan->center_freq); 181 channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
181 182
182 if ((channel < 1) || (channel > NUM_CHANNELS) || 183 if ((channel < 1) || (channel > NUM_CHANNELS) ||
183 !(priv->channel_mask & (1 << (channel - 1)))) 184 !(priv->channel_mask & (1 << (channel - 1))))
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index c09c8437c0b8..49300d04efdf 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
1193 goto out; 1193 goto out;
1194 1194
1195 } 1195 }
1196 freq = ieee80211_dsss_chan_to_freq(channel); 1196 freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
1197 1197
1198 out: 1198 out:
1199 orinoco_unlock(priv, &flags); 1199 orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index e8c5714bfd11..e175b9b8561b 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -110,7 +110,8 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
110 break; 110 break;
111 } 111 }
112 112
113 freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel)); 113 freq = ieee80211_channel_to_frequency(
114 le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
114 channel = ieee80211_get_channel(wiphy, freq); 115 channel = ieee80211_get_channel(wiphy, freq);
115 if (!channel) { 116 if (!channel) {
116 printk(KERN_DEBUG "Invalid channel designation %04X(%04X)", 117 printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -146,7 +147,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
146 ie_len = len - sizeof(*bss); 147 ie_len = len - sizeof(*bss);
147 ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len); 148 ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
148 chan = ie ? ie[2] : 0; 149 chan = ie ? ie[2] : 0;
149 freq = ieee80211_dsss_chan_to_freq(chan); 150 freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
150 channel = ieee80211_get_channel(wiphy, freq); 151 channel = ieee80211_get_channel(wiphy, freq);
151 152
152 timestamp = le64_to_cpu(bss->timestamp); 153 timestamp = le64_to_cpu(bss->timestamp);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 3b5508f982e8..b7a867b50b94 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -444,7 +444,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
444 for (i = 0; i < (6 - frq->e); i++) 444 for (i = 0; i < (6 - frq->e); i++)
445 denom *= 10; 445 denom *= 10;
446 446
447 chan = ieee80211_freq_to_dsss_chan(frq->m / denom); 447 chan = ieee80211_frequency_to_channel(frq->m / denom);
448 } 448 }
449 449
450 if ((chan < 1) || (chan > NUM_CHANNELS) || 450 if ((chan < 1) || (chan > NUM_CHANNELS) ||
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 6e635cfa24c8..043bd1c23c19 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -513,7 +513,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
513 if (!buf) 513 if (!buf)
514 return -ENOMEM; 514 return -ENOMEM;
515 515
516 left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size); 516 left = block_size = min_t(size_t, P54U_FW_BLOCK, priv->fw->size);
517 strcpy(buf, p54u_firmware_upload_3887); 517 strcpy(buf, p54u_firmware_upload_3887);
518 left -= strlen(p54u_firmware_upload_3887); 518 left -= strlen(p54u_firmware_upload_3887);
519 tmp += strlen(p54u_firmware_upload_3887); 519 tmp += strlen(p54u_firmware_upload_3887);
@@ -1053,6 +1053,10 @@ static int p54u_probe(struct usb_interface *intf,
1053 priv->upload_fw = p54u_upload_firmware_net2280; 1053 priv->upload_fw = p54u_upload_firmware_net2280;
1054 } 1054 }
1055 err = p54u_load_firmware(dev, intf); 1055 err = p54u_load_firmware(dev, intf);
1056 if (err) {
1057 usb_put_dev(udev);
1058 p54_free_common(dev);
1059 }
1056 return err; 1060 return err;
1057} 1061}
1058 1062
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 78fa64d3f223..ecbb0546cf3e 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -644,7 +644,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
644 wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie); 644 wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
645 if (wpa_ie_len > 0) { 645 if (wpa_ie_len > 0) {
646 iwe.cmd = IWEVGENIE; 646 iwe.cmd = IWEVGENIE;
647 iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN); 647 iwe.u.data.length = min_t(size_t, wpa_ie_len, MAX_WPA_IE_LEN);
648 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 648 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
649 &iwe, wpa_ie); 649 &iwe, wpa_ie);
650 } 650 }
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 5028557aa18a..39d22a154341 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1290,7 +1290,8 @@ static int set_channel(struct usbnet *usbdev, int channel)
1290 if (is_associated(usbdev)) 1290 if (is_associated(usbdev))
1291 return 0; 1291 return 0;
1292 1292
1293 dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000; 1293 dsconfig = 1000 *
1294 ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
1294 1295
1295 len = sizeof(config); 1296 len = sizeof(config);
1296 ret = rndis_query_oid(usbdev, 1297 ret = rndis_query_oid(usbdev,
@@ -2835,7 +2836,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2835 bssid, req_ie, req_ie_len, 2836 bssid, req_ie, req_ie_len,
2836 resp_ie, resp_ie_len, GFP_KERNEL); 2837 resp_ie, resp_ie_len, GFP_KERNEL);
2837 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) 2838 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
2838 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); 2839 cfg80211_ibss_joined(usbdev->net, bssid,
2840 get_current_channel(usbdev, NULL),
2841 GFP_KERNEL);
2839 2842
2840 kfree(info); 2843 kfree(info);
2841 2844
diff --git a/drivers/net/wireless/rsi/Kconfig b/drivers/net/wireless/rsi/Kconfig
new file mode 100644
index 000000000000..35245f994c10
--- /dev/null
+++ b/drivers/net/wireless/rsi/Kconfig
@@ -0,0 +1,30 @@
1config RSI_91X
2 tristate "Redpine Signals Inc 91x WLAN driver support"
3 depends on MAC80211
4 ---help---
5 This option enabes support for RSI 1x1 devices.
6 Select M (recommended), if you have a RSI 1x1 wireless module.
7
8config RSI_DEBUGFS
9 bool "Redpine Signals Inc debug support"
10 depends on RSI_91X
11 default y
12 ---help---
13 Say Y, if you would like to enable debug support. This option
14 creates debugfs entries
15
16config RSI_SDIO
17 tristate "Redpine Signals SDIO bus support"
18 depends on MMC && RSI_91X
19 default m
20 ---help---
21 This option enables the SDIO bus support in rsi drivers.
22 Select M (recommended), if you have a RSI 1x1 wireless module.
23
24config RSI_USB
25 tristate "Redpine Signals USB bus support"
26 depends on USB && RSI_91X
27 default m
28 ---help---
29 This option enables the USB bus support in rsi drivers.
30 Select M (recommended), if you have a RSI 1x1 wireless module.
diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile
new file mode 100644
index 000000000000..25828b692756
--- /dev/null
+++ b/drivers/net/wireless/rsi/Makefile
@@ -0,0 +1,12 @@
1rsi_91x-y += rsi_91x_main.o
2rsi_91x-y += rsi_91x_core.o
3rsi_91x-y += rsi_91x_mac80211.o
4rsi_91x-y += rsi_91x_mgmt.o
5rsi_91x-y += rsi_91x_pkt.o
6rsi_91x-$(CONFIG_RSI_DEBUGFS) += rsi_91x_debugfs.o
7
8rsi_usb-y += rsi_91x_usb.o rsi_91x_usb_ops.o
9rsi_sdio-y += rsi_91x_sdio.o rsi_91x_sdio_ops.o
10obj-$(CONFIG_RSI_91X) += rsi_91x.o
11obj-$(CONFIG_RSI_SDIO) += rsi_sdio.o
12obj-$(CONFIG_RSI_USB) += rsi_usb.o
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
new file mode 100644
index 000000000000..e89535e86caf
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -0,0 +1,342 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "rsi_mgmt.h"
18#include "rsi_common.h"
19
20/**
21 * rsi_determine_min_weight_queue() - This function determines the queue with
22 * the min weight.
23 * @common: Pointer to the driver private structure.
24 *
25 * Return: q_num: Corresponding queue number.
26 */
27static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
28{
29 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
30 u32 q_len = 0;
31 u8 ii = 0;
32
33 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
34 q_len = skb_queue_len(&common->tx_queue[ii]);
35 if ((tx_qinfo[ii].pkt_contended) && q_len) {
36 common->min_weight = tx_qinfo[ii].weight;
37 break;
38 }
39 }
40 return ii;
41}
42
43/**
44 * rsi_recalculate_weights() - This function recalculates the weights
45 * corresponding to each queue.
46 * @common: Pointer to the driver private structure.
47 *
48 * Return: recontend_queue bool variable
49 */
50static bool rsi_recalculate_weights(struct rsi_common *common)
51{
52 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
53 bool recontend_queue = false;
54 u8 ii = 0;
55 u32 q_len = 0;
56
57 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
58 q_len = skb_queue_len(&common->tx_queue[ii]);
59 /* Check for the need of contention */
60 if (q_len) {
61 if (tx_qinfo[ii].pkt_contended) {
62 tx_qinfo[ii].weight =
63 ((tx_qinfo[ii].weight > common->min_weight) ?
64 tx_qinfo[ii].weight - common->min_weight : 0);
65 } else {
66 tx_qinfo[ii].pkt_contended = 1;
67 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
68 recontend_queue = true;
69 }
70 } else { /* No packets so no contention */
71 tx_qinfo[ii].weight = 0;
72 tx_qinfo[ii].pkt_contended = 0;
73 }
74 }
75
76 return recontend_queue;
77}
78
79/**
80 * rsi_core_determine_hal_queue() - This function determines the queue from
81 * which packet has to be dequeued.
82 * @common: Pointer to the driver private structure.
83 *
84 * Return: q_num: Corresponding queue number on success.
85 */
86static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
87{
88 bool recontend_queue = false;
89 u32 q_len = 0;
90 u8 q_num = INVALID_QUEUE;
91 u8 ii, min = 0;
92
93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
94 if (!common->mgmt_q_block)
95 q_num = MGMT_SOFT_Q;
96 return q_num;
97 }
98
99 if (common->pkt_cnt != 0) {
100 --common->pkt_cnt;
101 return common->selected_qnum;
102 }
103
104get_queue_num:
105 q_num = 0;
106 recontend_queue = false;
107
108 q_num = rsi_determine_min_weight_queue(common);
109 q_len = skb_queue_len(&common->tx_queue[ii]);
110 ii = q_num;
111
112 /* Selecting the queue with least back off */
113 for (; ii < NUM_EDCA_QUEUES; ii++) {
114 if (((common->tx_qinfo[ii].pkt_contended) &&
115 (common->tx_qinfo[ii].weight < min)) && q_len) {
116 min = common->tx_qinfo[ii].weight;
117 q_num = ii;
118 }
119 }
120
121 common->tx_qinfo[q_num].pkt_contended = 0;
122 /* Adjust the back off values for all queues again */
123 recontend_queue = rsi_recalculate_weights(common);
124
125 q_len = skb_queue_len(&common->tx_queue[q_num]);
126 if (!q_len) {
127 /* If any queues are freshly contended and the selected queue
128 * doesn't have any packets
129 * then get the queue number again with fresh values
130 */
131 if (recontend_queue)
132 goto get_queue_num;
133
134 q_num = INVALID_QUEUE;
135 return q_num;
136 }
137
138 common->selected_qnum = q_num;
139 q_len = skb_queue_len(&common->tx_queue[q_num]);
140
141 switch (common->selected_qnum) {
142 case VO_Q:
143 if (q_len > MAX_CONTINUOUS_VO_PKTS)
144 common->pkt_cnt = (MAX_CONTINUOUS_VO_PKTS - 1);
145 else
146 common->pkt_cnt = --q_len;
147 break;
148
149 case VI_Q:
150 if (q_len > MAX_CONTINUOUS_VI_PKTS)
151 common->pkt_cnt = (MAX_CONTINUOUS_VI_PKTS - 1);
152 else
153 common->pkt_cnt = --q_len;
154
155 break;
156
157 default:
158 common->pkt_cnt = 0;
159 break;
160 }
161
162 return q_num;
163}
164
165/**
166 * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
167 * specified by the queue number.
168 * @common: Pointer to the driver private structure.
169 * @skb: Pointer to the socket buffer structure.
170 *
171 * Return: None.
172 */
173static void rsi_core_queue_pkt(struct rsi_common *common,
174 struct sk_buff *skb)
175{
176 u8 q_num = skb->priority;
177 if (q_num >= NUM_SOFT_QUEUES) {
178 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
179 __func__, q_num);
180 dev_kfree_skb(skb);
181 return;
182 }
183
184 skb_queue_tail(&common->tx_queue[q_num], skb);
185}
186
187/**
188 * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
189 * specified by the queue number.
190 * @common: Pointer to the driver private structure.
191 * @q_num: Queue number.
192 *
193 * Return: Pointer to sk_buff structure.
194 */
195static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
196 u8 q_num)
197{
198 if (q_num >= NUM_SOFT_QUEUES) {
199 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
200 __func__, q_num);
201 return NULL;
202 }
203
204 return skb_dequeue(&common->tx_queue[q_num]);
205}
206
207/**
208 * rsi_core_qos_processor() - This function is used to determine the wmm queue
209 * based on the backoff procedure. Data packets are
210 * dequeued from the selected hal queue and sent to
211 * the below layers.
212 * @common: Pointer to the driver private structure.
213 *
214 * Return: None.
215 */
216void rsi_core_qos_processor(struct rsi_common *common)
217{
218 struct rsi_hw *adapter = common->priv;
219 struct sk_buff *skb;
220 unsigned long tstamp_1, tstamp_2;
221 u8 q_num;
222 int status;
223
224 tstamp_1 = jiffies;
225 while (1) {
226 q_num = rsi_core_determine_hal_queue(common);
227 rsi_dbg(DATA_TX_ZONE,
228 "%s: Queue number = %d\n", __func__, q_num);
229
230 if (q_num == INVALID_QUEUE) {
231 rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
232 break;
233 }
234
235 mutex_lock(&common->tx_rxlock);
236
237 status = adapter->check_hw_queue_status(adapter, q_num);
238 if ((status <= 0)) {
239 mutex_unlock(&common->tx_rxlock);
240 break;
241 }
242
243 if ((q_num < MGMT_SOFT_Q) &&
244 ((skb_queue_len(&common->tx_queue[q_num])) <=
245 MIN_DATA_QUEUE_WATER_MARK)) {
246 if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
247 ieee80211_wake_queue(adapter->hw,
248 WME_AC(q_num));
249 }
250
251 skb = rsi_core_dequeue_pkt(common, q_num);
252 if (skb == NULL) {
253 mutex_unlock(&common->tx_rxlock);
254 break;
255 }
256
257 if (q_num == MGMT_SOFT_Q)
258 status = rsi_send_mgmt_pkt(common, skb);
259 else
260 status = rsi_send_data_pkt(common, skb);
261
262 if (status) {
263 mutex_unlock(&common->tx_rxlock);
264 break;
265 }
266
267 common->tx_stats.total_tx_pkt_send[q_num]++;
268
269 tstamp_2 = jiffies;
270 mutex_unlock(&common->tx_rxlock);
271
272 if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
273 schedule();
274 }
275}
276
277/**
278 * rsi_core_xmit() - This function transmits the packets received from mac80211
279 * @common: Pointer to the driver private structure.
280 * @skb: Pointer to the socket buffer structure.
281 *
282 * Return: None.
283 */
284void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
285{
286 struct rsi_hw *adapter = common->priv;
287 struct ieee80211_tx_info *info;
288 struct skb_info *tx_params;
289 struct ieee80211_hdr *tmp_hdr = NULL;
290 u8 q_num, tid = 0;
291
292 if ((!skb) || (!skb->len)) {
293 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
294 __func__);
295 goto xmit_fail;
296 }
297 info = IEEE80211_SKB_CB(skb);
298 tx_params = (struct skb_info *)info->driver_data;
299 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
300
301 if (common->fsm_state != FSM_MAC_INIT_DONE) {
302 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
303 goto xmit_fail;
304 }
305
306 if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
307 (ieee80211_is_ctl(tmp_hdr->frame_control))) {
308 q_num = MGMT_SOFT_Q;
309 skb->priority = q_num;
310 } else {
311 if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
312 tid = (skb->data[24] & IEEE80211_QOS_TID);
313 skb->priority = TID_TO_WME_AC(tid);
314 } else {
315 tid = IEEE80211_NONQOS_TID;
316 skb->priority = BE_Q;
317 }
318 q_num = skb->priority;
319 tx_params->tid = tid;
320 tx_params->sta_id = 0;
321 }
322
323 if ((q_num != MGMT_SOFT_Q) &&
324 ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
325 DATA_QUEUE_WATER_MARK)) {
326 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
327 ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
328 rsi_set_event(&common->tx_thread.event);
329 goto xmit_fail;
330 }
331
332 rsi_core_queue_pkt(common, skb);
333 rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
334 rsi_set_event(&common->tx_thread.event);
335
336 return;
337
338xmit_fail:
339 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
340 /* Dropping pkt here */
341 ieee80211_free_txskb(common->priv->hw, skb);
342}
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
new file mode 100644
index 000000000000..7e4ef4554411
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -0,0 +1,339 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "rsi_debugfs.h"
18#include "rsi_sdio.h"
19
20/**
21 * rsi_sdio_stats_read() - This function returns the sdio status of the driver.
22 * @seq: Pointer to the sequence file structure.
23 * @data: Pointer to the data.
24 *
25 * Return: 0 on success, -1 on failure.
26 */
27static int rsi_sdio_stats_read(struct seq_file *seq, void *data)
28{
29 struct rsi_common *common = seq->private;
30 struct rsi_hw *adapter = common->priv;
31 struct rsi_91x_sdiodev *dev =
32 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
33
34 seq_printf(seq, "total_sdio_interrupts: %d\n",
35 dev->rx_info.sdio_int_counter);
36 seq_printf(seq, "sdio_msdu_pending_intr_count: %d\n",
37 dev->rx_info.total_sdio_msdu_pending_intr);
38 seq_printf(seq, "sdio_buff_full_count : %d\n",
39 dev->rx_info.buf_full_counter);
40 seq_printf(seq, "sdio_buf_semi_full_count %d\n",
41 dev->rx_info.buf_semi_full_counter);
42 seq_printf(seq, "sdio_unknown_intr_count: %d\n",
43 dev->rx_info.total_sdio_unknown_intr);
44 /* RX Path Stats */
45 seq_printf(seq, "BUFFER FULL STATUS : %d\n",
46 dev->rx_info.buffer_full);
47 seq_printf(seq, "SEMI BUFFER FULL STATUS : %d\n",
48 dev->rx_info.semi_buffer_full);
49 seq_printf(seq, "MGMT BUFFER FULL STATUS : %d\n",
50 dev->rx_info.mgmt_buffer_full);
51 seq_printf(seq, "BUFFER FULL COUNTER : %d\n",
52 dev->rx_info.buf_full_counter);
53 seq_printf(seq, "BUFFER SEMI FULL COUNTER : %d\n",
54 dev->rx_info.buf_semi_full_counter);
55 seq_printf(seq, "MGMT BUFFER FULL COUNTER : %d\n",
56 dev->rx_info.mgmt_buf_full_counter);
57
58 return 0;
59}
60
61/**
62 * rsi_sdio_stats_open() - This funtion calls single open function of seq_file
63 * to open file and read contents from it.
64 * @inode: Pointer to the inode structure.
65 * @file: Pointer to the file structure.
66 *
67 * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
68 */
69static int rsi_sdio_stats_open(struct inode *inode,
70 struct file *file)
71{
72 return single_open(file, rsi_sdio_stats_read, inode->i_private);
73}
74
75/**
76 * rsi_version_read() - This function gives driver and firmware version number.
77 * @seq: Pointer to the sequence file structure.
78 * @data: Pointer to the data.
79 *
80 * Return: 0 on success, -1 on failure.
81 */
82static int rsi_version_read(struct seq_file *seq, void *data)
83{
84 struct rsi_common *common = seq->private;
85
86 common->driver_ver.major = 0;
87 common->driver_ver.minor = 1;
88 common->driver_ver.release_num = 0;
89 common->driver_ver.patch_num = 0;
90 seq_printf(seq, "Driver : %x.%d.%d.%d\nLMAC : %d.%d.%d.%d\n",
91 common->driver_ver.major,
92 common->driver_ver.minor,
93 common->driver_ver.release_num,
94 common->driver_ver.patch_num,
95 common->fw_ver.major,
96 common->fw_ver.minor,
97 common->fw_ver.release_num,
98 common->fw_ver.patch_num);
99 return 0;
100}
101
102/**
103 * rsi_version_open() - This funtion calls single open function of seq_file to
104 * open file and read contents from it.
105 * @inode: Pointer to the inode structure.
106 * @file: Pointer to the file structure.
107 *
108 * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
109 */
110static int rsi_version_open(struct inode *inode,
111 struct file *file)
112{
113 return single_open(file, rsi_version_read, inode->i_private);
114}
115
116/**
117 * rsi_stats_read() - This function return the status of the driver.
118 * @seq: Pointer to the sequence file structure.
119 * @data: Pointer to the data.
120 *
121 * Return: 0 on success, -1 on failure.
122 */
123static int rsi_stats_read(struct seq_file *seq, void *data)
124{
125 struct rsi_common *common = seq->private;
126
127 unsigned char fsm_state[][32] = {
128 "FSM_CARD_NOT_READY",
129 "FSM_BOOT_PARAMS_SENT",
130 "FSM_EEPROM_READ_MAC_ADDR",
131 "FSM_RESET_MAC_SENT",
132 "FSM_RADIO_CAPS_SENT",
133 "FSM_BB_RF_PROG_SENT",
134 "FSM_MAC_INIT_DONE"
135 };
136 seq_puts(seq, "==> RSI STA DRIVER STATUS <==\n");
137 seq_puts(seq, "DRIVER_FSM_STATE: ");
138
139 if (common->fsm_state <= FSM_MAC_INIT_DONE)
140 seq_printf(seq, "%s", fsm_state[common->fsm_state]);
141
142 seq_printf(seq, "(%d)\n\n", common->fsm_state);
143
144 /* Mgmt TX Path Stats */
145 seq_printf(seq, "total_mgmt_pkt_send : %d\n",
146 common->tx_stats.total_tx_pkt_send[MGMT_SOFT_Q]);
147 seq_printf(seq, "total_mgmt_pkt_queued : %d\n",
148 skb_queue_len(&common->tx_queue[4]));
149 seq_printf(seq, "total_mgmt_pkt_freed : %d\n",
150 common->tx_stats.total_tx_pkt_freed[MGMT_SOFT_Q]);
151
152 /* Data TX Path Stats */
153 seq_printf(seq, "total_data_vo_pkt_send: %8d\t",
154 common->tx_stats.total_tx_pkt_send[VO_Q]);
155 seq_printf(seq, "total_data_vo_pkt_queued: %8d\t",
156 skb_queue_len(&common->tx_queue[0]));
157 seq_printf(seq, "total_vo_pkt_freed: %8d\n",
158 common->tx_stats.total_tx_pkt_freed[VO_Q]);
159 seq_printf(seq, "total_data_vi_pkt_send: %8d\t",
160 common->tx_stats.total_tx_pkt_send[VI_Q]);
161 seq_printf(seq, "total_data_vi_pkt_queued: %8d\t",
162 skb_queue_len(&common->tx_queue[1]));
163 seq_printf(seq, "total_vi_pkt_freed: %8d\n",
164 common->tx_stats.total_tx_pkt_freed[VI_Q]);
165 seq_printf(seq, "total_data_be_pkt_send: %8d\t",
166 common->tx_stats.total_tx_pkt_send[BE_Q]);
167 seq_printf(seq, "total_data_be_pkt_queued: %8d\t",
168 skb_queue_len(&common->tx_queue[2]));
169 seq_printf(seq, "total_be_pkt_freed: %8d\n",
170 common->tx_stats.total_tx_pkt_freed[BE_Q]);
171 seq_printf(seq, "total_data_bk_pkt_send: %8d\t",
172 common->tx_stats.total_tx_pkt_send[BK_Q]);
173 seq_printf(seq, "total_data_bk_pkt_queued: %8d\t",
174 skb_queue_len(&common->tx_queue[3]));
175 seq_printf(seq, "total_bk_pkt_freed: %8d\n",
176 common->tx_stats.total_tx_pkt_freed[BK_Q]);
177
178 seq_puts(seq, "\n");
179 return 0;
180}
181
182/**
183 * rsi_stats_open() - This funtion calls single open function of seq_file to
184 * open file and read contents from it.
185 * @inode: Pointer to the inode structure.
186 * @file: Pointer to the file structure.
187 *
188 * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
189 */
190static int rsi_stats_open(struct inode *inode,
191 struct file *file)
192{
193 return single_open(file, rsi_stats_read, inode->i_private);
194}
195
196/**
197 * rsi_debug_zone_read() - This function display the currently enabled debug zones.
198 * @seq: Pointer to the sequence file structure.
199 * @data: Pointer to the data.
200 *
201 * Return: 0 on success, -1 on failure.
202 */
203static int rsi_debug_zone_read(struct seq_file *seq, void *data)
204{
205 rsi_dbg(FSM_ZONE, "%x: rsi_enabled zone", rsi_zone_enabled);
206 seq_printf(seq, "The zones available are %#x\n",
207 rsi_zone_enabled);
208 return 0;
209}
210
211/**
212 * rsi_debug_read() - This funtion calls single open function of seq_file to
213 * open file and read contents from it.
214 * @inode: Pointer to the inode structure.
215 * @file: Pointer to the file structure.
216 *
217 * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
218 */
219static int rsi_debug_read(struct inode *inode,
220 struct file *file)
221{
222 return single_open(file, rsi_debug_zone_read, inode->i_private);
223}
224
225/**
226 * rsi_debug_zone_write() - This function writes into hal queues as per user
227 * requirement.
228 * @filp: Pointer to the file structure.
229 * @buff: Pointer to the character buffer.
230 * @len: Length of the data to be written into buffer.
231 * @data: Pointer to the data.
232 *
233 * Return: len: Number of bytes read.
234 */
235static ssize_t rsi_debug_zone_write(struct file *filp,
236 const char __user *buff,
237 size_t len,
238 loff_t *data)
239{
240 unsigned long dbg_zone;
241 int ret;
242
243 if (!len)
244 return 0;
245
246 ret = kstrtoul_from_user(buff, len, 16, &dbg_zone);
247
248 if (ret)
249 return ret;
250
251 rsi_zone_enabled = dbg_zone;
252 return len;
253}
254
255#define FOPS(fopen) { \
256 .owner = THIS_MODULE, \
257 .open = (fopen), \
258 .read = seq_read, \
259 .llseek = seq_lseek, \
260}
261
262#define FOPS_RW(fopen, fwrite) { \
263 .owner = THIS_MODULE, \
264 .open = (fopen), \
265 .read = seq_read, \
266 .llseek = seq_lseek, \
267 .write = (fwrite), \
268}
269
270static const struct rsi_dbg_files dev_debugfs_files[] = {
271 {"version", 0644, FOPS(rsi_version_open),},
272 {"stats", 0644, FOPS(rsi_stats_open),},
273 {"debug_zone", 0666, FOPS_RW(rsi_debug_read, rsi_debug_zone_write),},
274 {"sdio_stats", 0644, FOPS(rsi_sdio_stats_open),},
275};
276
277/**
278 * rsi_init_dbgfs() - This function initializes the dbgfs entry.
279 * @adapter: Pointer to the adapter structure.
280 *
281 * Return: 0 on success, -1 on failure.
282 */
283int rsi_init_dbgfs(struct rsi_hw *adapter)
284{
285 struct rsi_common *common = adapter->priv;
286 struct rsi_debugfs *dev_dbgfs;
287 char devdir[6];
288 int ii;
289 const struct rsi_dbg_files *files;
290
291 dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL);
292 adapter->dfsentry = dev_dbgfs;
293
294 snprintf(devdir, sizeof(devdir), "%s",
295 wiphy_name(adapter->hw->wiphy));
296 dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
297
298 if (IS_ERR(dev_dbgfs->subdir)) {
299 if (dev_dbgfs->subdir == ERR_PTR(-ENODEV))
300 rsi_dbg(ERR_ZONE,
301 "%s:Debugfs has not been mounted\n", __func__);
302 else
303 rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir);
304
305 adapter->dfsentry = NULL;
306 kfree(dev_dbgfs);
307 return (int)PTR_ERR(dev_dbgfs->subdir);
308 } else {
309 for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
310 files = &dev_debugfs_files[ii];
311 dev_dbgfs->rsi_files[ii] =
312 debugfs_create_file(files->name,
313 files->perms,
314 dev_dbgfs->subdir,
315 common,
316 &files->fops);
317 }
318 }
319 return 0;
320}
321EXPORT_SYMBOL_GPL(rsi_init_dbgfs);
322
323/**
324 * rsi_remove_dbgfs() - Removes the previously created dbgfs file entries
325 * in the reverse order of creation.
326 * @adapter: Pointer to the adapter structure.
327 *
328 * Return: None.
329 */
330void rsi_remove_dbgfs(struct rsi_hw *adapter)
331{
332 struct rsi_debugfs *dev_dbgfs = adapter->dfsentry;
333
334 if (!dev_dbgfs)
335 return;
336
337 debugfs_remove_recursive(dev_dbgfs->subdir);
338}
339EXPORT_SYMBOL_GPL(rsi_remove_dbgfs);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
new file mode 100644
index 000000000000..84164747ace0
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -0,0 +1,1008 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/etherdevice.h>
18#include "rsi_debugfs.h"
19#include "rsi_mgmt.h"
20#include "rsi_common.h"
21
22static const struct ieee80211_channel rsi_2ghz_channels[] = {
23 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
24 .hw_value = 1 }, /* Channel 1 */
25 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
26 .hw_value = 2 }, /* Channel 2 */
27 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
28 .hw_value = 3 }, /* Channel 3 */
29 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
30 .hw_value = 4 }, /* Channel 4 */
31 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
32 .hw_value = 5 }, /* Channel 5 */
33 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
34 .hw_value = 6 }, /* Channel 6 */
35 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
36 .hw_value = 7 }, /* Channel 7 */
37 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
38 .hw_value = 8 }, /* Channel 8 */
39 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
40 .hw_value = 9 }, /* Channel 9 */
41 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
42 .hw_value = 10 }, /* Channel 10 */
43 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
44 .hw_value = 11 }, /* Channel 11 */
45 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
46 .hw_value = 12 }, /* Channel 12 */
47 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
48 .hw_value = 13 }, /* Channel 13 */
49 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
50 .hw_value = 14 }, /* Channel 14 */
51};
52
53static const struct ieee80211_channel rsi_5ghz_channels[] = {
54 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
55 .hw_value = 36, }, /* Channel 36 */
56 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
57 .hw_value = 40, }, /* Channel 40 */
58 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
59 .hw_value = 44, }, /* Channel 44 */
60 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
61 .hw_value = 48, }, /* Channel 48 */
62 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
63 .hw_value = 52, }, /* Channel 52 */
64 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
65 .hw_value = 56, }, /* Channel 56 */
66 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
67 .hw_value = 60, }, /* Channel 60 */
68 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
69 .hw_value = 64, }, /* Channel 64 */
70 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
71 .hw_value = 100, }, /* Channel 100 */
72 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
73 .hw_value = 104, }, /* Channel 104 */
74 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
75 .hw_value = 108, }, /* Channel 108 */
76 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
77 .hw_value = 112, }, /* Channel 112 */
78 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
79 .hw_value = 116, }, /* Channel 116 */
80 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
81 .hw_value = 120, }, /* Channel 120 */
82 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
83 .hw_value = 124, }, /* Channel 124 */
84 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
85 .hw_value = 128, }, /* Channel 128 */
86 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
87 .hw_value = 132, }, /* Channel 132 */
88 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
89 .hw_value = 136, }, /* Channel 136 */
90 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
91 .hw_value = 140, }, /* Channel 140 */
92 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
93 .hw_value = 149, }, /* Channel 149 */
94 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
95 .hw_value = 153, }, /* Channel 153 */
96 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
97 .hw_value = 157, }, /* Channel 157 */
98 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
99 .hw_value = 161, }, /* Channel 161 */
100 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
101 .hw_value = 165, }, /* Channel 165 */
102};
103
104struct ieee80211_rate rsi_rates[12] = {
105 { .bitrate = STD_RATE_01 * 5, .hw_value = RSI_RATE_1 },
106 { .bitrate = STD_RATE_02 * 5, .hw_value = RSI_RATE_2 },
107 { .bitrate = STD_RATE_5_5 * 5, .hw_value = RSI_RATE_5_5 },
108 { .bitrate = STD_RATE_11 * 5, .hw_value = RSI_RATE_11 },
109 { .bitrate = STD_RATE_06 * 5, .hw_value = RSI_RATE_6 },
110 { .bitrate = STD_RATE_09 * 5, .hw_value = RSI_RATE_9 },
111 { .bitrate = STD_RATE_12 * 5, .hw_value = RSI_RATE_12 },
112 { .bitrate = STD_RATE_18 * 5, .hw_value = RSI_RATE_18 },
113 { .bitrate = STD_RATE_24 * 5, .hw_value = RSI_RATE_24 },
114 { .bitrate = STD_RATE_36 * 5, .hw_value = RSI_RATE_36 },
115 { .bitrate = STD_RATE_48 * 5, .hw_value = RSI_RATE_48 },
116 { .bitrate = STD_RATE_54 * 5, .hw_value = RSI_RATE_54 },
117};
118
119const u16 rsi_mcsrates[8] = {
120 RSI_RATE_MCS0, RSI_RATE_MCS1, RSI_RATE_MCS2, RSI_RATE_MCS3,
121 RSI_RATE_MCS4, RSI_RATE_MCS5, RSI_RATE_MCS6, RSI_RATE_MCS7
122};
123
124/**
125 * rsi_is_cipher_wep() - This function determines if the cipher is WEP or not.
126 * @common: Pointer to the driver private structure.
127 *
128 * Return: If cipher type is WEP, a value of 1 is returned, else 0.
129 */
130
131bool rsi_is_cipher_wep(struct rsi_common *common)
132{
133 if (((common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP104) ||
134 (common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP40)) &&
135 (!common->secinfo.ptk_cipher))
136 return true;
137 else
138 return false;
139}
140
141/**
142 * rsi_register_rates_channels() - This function registers channels and rates.
143 * @adapter: Pointer to the adapter structure.
144 * @band: Operating band to be set.
145 *
146 * Return: None.
147 */
148static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
149{
150 struct ieee80211_supported_band *sbands = &adapter->sbands[band];
151 void *channels = NULL;
152
153 if (band == IEEE80211_BAND_2GHZ) {
154 channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
155 memcpy(channels,
156 rsi_2ghz_channels,
157 sizeof(rsi_2ghz_channels));
158 sbands->band = IEEE80211_BAND_2GHZ;
159 sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
160 sbands->bitrates = rsi_rates;
161 sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
162 } else {
163 channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
164 memcpy(channels,
165 rsi_5ghz_channels,
166 sizeof(rsi_5ghz_channels));
167 sbands->band = IEEE80211_BAND_5GHZ;
168 sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
169 sbands->bitrates = &rsi_rates[4];
170 sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
171 }
172
173 sbands->channels = channels;
174
175 memset(&sbands->ht_cap, 0, sizeof(struct ieee80211_sta_ht_cap));
176 sbands->ht_cap.ht_supported = true;
177 sbands->ht_cap.cap = (IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
178 IEEE80211_HT_CAP_SGI_20 |
179 IEEE80211_HT_CAP_SGI_40);
180 sbands->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
181 sbands->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
182 sbands->ht_cap.mcs.rx_mask[0] = 0xff;
183 sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
184 /* sbands->ht_cap.mcs.rx_highest = 0x82; */
185}
186
187/**
188 * rsi_mac80211_attach() - This function is used to de-initialize the
189 * Mac80211 stack.
190 * @adapter: Pointer to the adapter structure.
191 *
192 * Return: None.
193 */
194void rsi_mac80211_detach(struct rsi_hw *adapter)
195{
196 struct ieee80211_hw *hw = adapter->hw;
197
198 if (hw) {
199 ieee80211_stop_queues(hw);
200 ieee80211_unregister_hw(hw);
201 ieee80211_free_hw(hw);
202 }
203
204 rsi_remove_dbgfs(adapter);
205}
206EXPORT_SYMBOL_GPL(rsi_mac80211_detach);
207
208/**
209 * rsi_indicate_tx_status() - This function indicates the transmit status.
210 * @adapter: Pointer to the adapter structure.
211 * @skb: Pointer to the socket buffer structure.
212 * @status: Status
213 *
214 * Return: None.
215 */
216void rsi_indicate_tx_status(struct rsi_hw *adapter,
217 struct sk_buff *skb,
218 int status)
219{
220 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
221
222 memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
223
224 if (!status)
225 info->flags |= IEEE80211_TX_STAT_ACK;
226
227 ieee80211_tx_status_irqsafe(adapter->hw, skb);
228}
229
230/**
231 * rsi_mac80211_tx() - This is the handler that 802.11 module calls for each
232 * transmitted frame.SKB contains the buffer starting
233 * from the IEEE 802.11 header.
234 * @hw: Pointer to the ieee80211_hw structure.
235 * @control: Pointer to the ieee80211_tx_control structure
236 * @skb: Pointer to the socket buffer structure.
237 *
238 * Return: None
239 */
240static void rsi_mac80211_tx(struct ieee80211_hw *hw,
241 struct ieee80211_tx_control *control,
242 struct sk_buff *skb)
243{
244 struct rsi_hw *adapter = hw->priv;
245 struct rsi_common *common = adapter->priv;
246
247 rsi_core_xmit(common, skb);
248}
249
250/**
251 * rsi_mac80211_start() - This is first handler that 802.11 module calls, since
252 * the driver init is complete by then, just
253 * returns success.
254 * @hw: Pointer to the ieee80211_hw structure.
255 *
256 * Return: 0 as success.
257 */
258static int rsi_mac80211_start(struct ieee80211_hw *hw)
259{
260 struct rsi_hw *adapter = hw->priv;
261 struct rsi_common *common = adapter->priv;
262
263 mutex_lock(&common->mutex);
264 common->iface_down = false;
265 mutex_unlock(&common->mutex);
266
267 return 0;
268}
269
270/**
271 * rsi_mac80211_stop() - This is the last handler that 802.11 module calls.
272 * @hw: Pointer to the ieee80211_hw structure.
273 *
274 * Return: None.
275 */
276static void rsi_mac80211_stop(struct ieee80211_hw *hw)
277{
278 struct rsi_hw *adapter = hw->priv;
279 struct rsi_common *common = adapter->priv;
280
281 mutex_lock(&common->mutex);
282 common->iface_down = true;
283 mutex_unlock(&common->mutex);
284}
285
286/**
287 * rsi_mac80211_add_interface() - This function is called when a netdevice
288 * attached to the hardware is enabled.
289 * @hw: Pointer to the ieee80211_hw structure.
290 * @vif: Pointer to the ieee80211_vif structure.
291 *
292 * Return: ret: 0 on success, negative error code on failure.
293 */
294static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
295 struct ieee80211_vif *vif)
296{
297 struct rsi_hw *adapter = hw->priv;
298 struct rsi_common *common = adapter->priv;
299 int ret = -EOPNOTSUPP;
300
301 mutex_lock(&common->mutex);
302 switch (vif->type) {
303 case NL80211_IFTYPE_STATION:
304 if (!adapter->sc_nvifs) {
305 ++adapter->sc_nvifs;
306 adapter->vifs[0] = vif;
307 ret = rsi_set_vap_capabilities(common, STA_OPMODE);
308 }
309 break;
310 default:
311 rsi_dbg(ERR_ZONE,
312 "%s: Interface type %d not supported\n", __func__,
313 vif->type);
314 }
315 mutex_unlock(&common->mutex);
316
317 return ret;
318}
319
320/**
321 * rsi_mac80211_remove_interface() - This function notifies driver that an
322 * interface is going down.
323 * @hw: Pointer to the ieee80211_hw structure.
324 * @vif: Pointer to the ieee80211_vif structure.
325 *
326 * Return: None.
327 */
328static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
329 struct ieee80211_vif *vif)
330{
331 struct rsi_hw *adapter = hw->priv;
332 struct rsi_common *common = adapter->priv;
333
334 mutex_lock(&common->mutex);
335 if (vif->type == NL80211_IFTYPE_STATION)
336 adapter->sc_nvifs--;
337
338 if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif)))
339 adapter->vifs[0] = NULL;
340 mutex_unlock(&common->mutex);
341}
342
343/**
344 * rsi_mac80211_config() - This function is a handler for configuration
345 * requests. The stack calls this function to
346 * change hardware configuration, e.g., channel.
347 * @hw: Pointer to the ieee80211_hw structure.
348 * @changed: Changed flags set.
349 *
350 * Return: 0 on success, negative error code on failure.
351 */
352static int rsi_mac80211_config(struct ieee80211_hw *hw,
353 u32 changed)
354{
355 struct rsi_hw *adapter = hw->priv;
356 struct rsi_common *common = adapter->priv;
357 int status = -EOPNOTSUPP;
358
359 mutex_lock(&common->mutex);
360 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
361 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
362 u16 channel = curchan->hw_value;
363
364 rsi_dbg(INFO_ZONE,
365 "%s: Set channel: %d MHz type: %d channel_no %d\n",
366 __func__, curchan->center_freq,
367 curchan->flags, channel);
368 common->band = curchan->band;
369 status = rsi_set_channel(adapter->priv, channel);
370 }
371 mutex_unlock(&common->mutex);
372
373 return status;
374}
375
376/**
377 * rsi_get_connected_channel() - This function is used to get the current
378 * connected channel number.
379 * @adapter: Pointer to the adapter structure.
380 *
381 * Return: Current connected AP's channel number is returned.
382 */
383u16 rsi_get_connected_channel(struct rsi_hw *adapter)
384{
385 struct ieee80211_vif *vif = adapter->vifs[0];
386 if (vif) {
387 struct ieee80211_bss_conf *bss = &vif->bss_conf;
388 struct ieee80211_channel *channel = bss->chandef.chan;
389 return channel->hw_value;
390 }
391
392 return 0;
393}
394
395/**
396 * rsi_mac80211_bss_info_changed() - This function is a handler for config
397 * requests related to BSS parameters that
398 * may vary during BSS's lifespan.
399 * @hw: Pointer to the ieee80211_hw structure.
400 * @vif: Pointer to the ieee80211_vif structure.
401 * @bss_conf: Pointer to the ieee80211_bss_conf structure.
402 * @changed: Changed flags set.
403 *
404 * Return: None.
405 */
406static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
407 struct ieee80211_vif *vif,
408 struct ieee80211_bss_conf *bss_conf,
409 u32 changed)
410{
411 struct rsi_hw *adapter = hw->priv;
412 struct rsi_common *common = adapter->priv;
413
414 mutex_lock(&common->mutex);
415 if (changed & BSS_CHANGED_ASSOC) {
416 rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
417 __func__, bss_conf->assoc);
418 rsi_inform_bss_status(common,
419 bss_conf->assoc,
420 bss_conf->bssid,
421 bss_conf->qos,
422 bss_conf->aid);
423 }
424 mutex_unlock(&common->mutex);
425}
426
427/**
428 * rsi_mac80211_conf_filter() - This function configure the device's RX filter.
429 * @hw: Pointer to the ieee80211_hw structure.
430 * @changed: Changed flags set.
431 * @total_flags: Total initial flags set.
432 * @multicast: Multicast.
433 *
434 * Return: None.
435 */
436static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
437 u32 changed_flags,
438 u32 *total_flags,
439 u64 multicast)
440{
441 /* Not doing much here as of now */
442 *total_flags &= RSI_SUPP_FILTERS;
443}
444
445/**
446 * rsi_mac80211_conf_tx() - This function configures TX queue parameters
447 * (EDCF (aifs, cw_min, cw_max), bursting)
448 * for a hardware TX queue.
449 * @hw: Pointer to the ieee80211_hw structure
450 * @vif: Pointer to the ieee80211_vif structure.
451 * @queue: Queue number.
452 * @params: Pointer to ieee80211_tx_queue_params structure.
453 *
454 * Return: 0 on success, negative error code on failure.
455 */
456static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
457 struct ieee80211_vif *vif, u16 queue,
458 const struct ieee80211_tx_queue_params *params)
459{
460 struct rsi_hw *adapter = hw->priv;
461 struct rsi_common *common = adapter->priv;
462 u8 idx = 0;
463
464 if (queue >= IEEE80211_NUM_ACS)
465 return 0;
466
467 rsi_dbg(INFO_ZONE,
468 "%s: Conf queue %d, aifs: %d, cwmin: %d cwmax: %d, txop: %d\n",
469 __func__, queue, params->aifs,
470 params->cw_min, params->cw_max, params->txop);
471
472 mutex_lock(&common->mutex);
473 /* Map into the way the f/w expects */
474 switch (queue) {
475 case IEEE80211_AC_VO:
476 idx = VO_Q;
477 break;
478 case IEEE80211_AC_VI:
479 idx = VI_Q;
480 break;
481 case IEEE80211_AC_BE:
482 idx = BE_Q;
483 break;
484 case IEEE80211_AC_BK:
485 idx = BK_Q;
486 break;
487 default:
488 idx = BE_Q;
489 break;
490 }
491
492 memcpy(&common->edca_params[idx],
493 params,
494 sizeof(struct ieee80211_tx_queue_params));
495 mutex_unlock(&common->mutex);
496
497 return 0;
498}
499
500/**
501 * rsi_hal_key_config() - This function loads the keys into the firmware.
502 * @hw: Pointer to the ieee80211_hw structure.
503 * @vif: Pointer to the ieee80211_vif structure.
504 * @key: Pointer to the ieee80211_key_conf structure.
505 *
506 * Return: status: 0 on success, -1 on failure.
507 */
508static int rsi_hal_key_config(struct ieee80211_hw *hw,
509 struct ieee80211_vif *vif,
510 struct ieee80211_key_conf *key)
511{
512 struct rsi_hw *adapter = hw->priv;
513 int status;
514 u8 key_type;
515
516 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
517 key_type = RSI_PAIRWISE_KEY;
518 else
519 key_type = RSI_GROUP_KEY;
520
521 rsi_dbg(ERR_ZONE, "%s: Cipher 0x%x key_type: %d key_len: %d\n",
522 __func__, key->cipher, key_type, key->keylen);
523
524 if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) ||
525 (key->cipher == WLAN_CIPHER_SUITE_WEP40)) {
526 status = rsi_hal_load_key(adapter->priv,
527 key->key,
528 key->keylen,
529 RSI_PAIRWISE_KEY,
530 key->keyidx,
531 key->cipher);
532 if (status)
533 return status;
534 }
535 return rsi_hal_load_key(adapter->priv,
536 key->key,
537 key->keylen,
538 key_type,
539 key->keyidx,
540 key->cipher);
541}
542
543/**
544 * rsi_mac80211_set_key() - This function sets type of key to be loaded.
545 * @hw: Pointer to the ieee80211_hw structure.
546 * @cmd: enum set_key_cmd.
547 * @vif: Pointer to the ieee80211_vif structure.
548 * @sta: Pointer to the ieee80211_sta structure.
549 * @key: Pointer to the ieee80211_key_conf structure.
550 *
551 * Return: status: 0 on success, negative error code on failure.
552 */
553static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
554 enum set_key_cmd cmd,
555 struct ieee80211_vif *vif,
556 struct ieee80211_sta *sta,
557 struct ieee80211_key_conf *key)
558{
559 struct rsi_hw *adapter = hw->priv;
560 struct rsi_common *common = adapter->priv;
561 struct security_info *secinfo = &common->secinfo;
562 int status;
563
564 mutex_lock(&common->mutex);
565 switch (cmd) {
566 case SET_KEY:
567 secinfo->security_enable = true;
568 status = rsi_hal_key_config(hw, vif, key);
569 if (status) {
570 mutex_unlock(&common->mutex);
571 return status;
572 }
573
574 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
575 secinfo->ptk_cipher = key->cipher;
576 else
577 secinfo->gtk_cipher = key->cipher;
578
579 key->hw_key_idx = key->keyidx;
580 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
581
582 rsi_dbg(ERR_ZONE, "%s: RSI set_key\n", __func__);
583 break;
584
585 case DISABLE_KEY:
586 secinfo->security_enable = false;
587 rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
588 memset(key, 0, sizeof(struct ieee80211_key_conf));
589 status = rsi_hal_key_config(hw, vif, key);
590 break;
591
592 default:
593 status = -EOPNOTSUPP;
594 break;
595 }
596
597 mutex_unlock(&common->mutex);
598 return status;
599}
600
601/**
602 * rsi_mac80211_ampdu_action() - This function selects the AMPDU action for
603 * the corresponding mlme_action flag and
604 * informs the f/w regarding this.
605 * @hw: Pointer to the ieee80211_hw structure.
606 * @vif: Pointer to the ieee80211_vif structure.
607 * @action: ieee80211_ampdu_mlme_action enum.
608 * @sta: Pointer to the ieee80211_sta structure.
609 * @tid: Traffic identifier.
610 * @ssn: Pointer to ssn value.
611 * @buf_size: Buffer size (for kernel version > 2.6.38).
612 *
613 * Return: status: 0 on success, negative error code on failure.
614 */
615static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
616 struct ieee80211_vif *vif,
617 enum ieee80211_ampdu_mlme_action action,
618 struct ieee80211_sta *sta,
619 unsigned short tid,
620 unsigned short *ssn,
621 unsigned char buf_size)
622{
623 int status = -EOPNOTSUPP;
624 struct rsi_hw *adapter = hw->priv;
625 struct rsi_common *common = adapter->priv;
626 u16 seq_no = 0;
627 u8 ii = 0;
628
629 for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
630 if (vif == adapter->vifs[ii])
631 break;
632 }
633
634 mutex_lock(&common->mutex);
635 rsi_dbg(INFO_ZONE, "%s: AMPDU action %d called\n", __func__, action);
636 if (ssn != NULL)
637 seq_no = *ssn;
638
639 switch (action) {
640 case IEEE80211_AMPDU_RX_START:
641 status = rsi_send_aggregation_params_frame(common,
642 tid,
643 seq_no,
644 buf_size,
645 STA_RX_ADDBA_DONE);
646 break;
647
648 case IEEE80211_AMPDU_RX_STOP:
649 status = rsi_send_aggregation_params_frame(common,
650 tid,
651 0,
652 buf_size,
653 STA_RX_DELBA);
654 break;
655
656 case IEEE80211_AMPDU_TX_START:
657 common->vif_info[ii].seq_start = seq_no;
658 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
659 break;
660
661 case IEEE80211_AMPDU_TX_STOP_CONT:
662 case IEEE80211_AMPDU_TX_STOP_FLUSH:
663 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
664 status = rsi_send_aggregation_params_frame(common,
665 tid,
666 seq_no,
667 buf_size,
668 STA_TX_DELBA);
669 if (!status)
670 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
671 break;
672
673 case IEEE80211_AMPDU_TX_OPERATIONAL:
674 status = rsi_send_aggregation_params_frame(common,
675 tid,
676 common->vif_info[ii]
677 .seq_start,
678 buf_size,
679 STA_TX_ADDBA_DONE);
680 break;
681
682 default:
683 rsi_dbg(ERR_ZONE, "%s: Uknown AMPDU action\n", __func__);
684 break;
685 }
686
687 mutex_unlock(&common->mutex);
688 return status;
689}
690
691/**
692 * rsi_mac80211_set_rts_threshold() - This function sets rts threshold value.
693 * @hw: Pointer to the ieee80211_hw structure.
694 * @value: Rts threshold value.
695 *
696 * Return: 0 on success.
697 */
698static int rsi_mac80211_set_rts_threshold(struct ieee80211_hw *hw,
699 u32 value)
700{
701 struct rsi_hw *adapter = hw->priv;
702 struct rsi_common *common = adapter->priv;
703
704 mutex_lock(&common->mutex);
705 common->rts_threshold = value;
706 mutex_unlock(&common->mutex);
707
708 return 0;
709}
710
711/**
712 * rsi_mac80211_set_rate_mask() - This function sets bitrate_mask to be used.
713 * @hw: Pointer to the ieee80211_hw structure
714 * @vif: Pointer to the ieee80211_vif structure.
715 * @mask: Pointer to the cfg80211_bitrate_mask structure.
716 *
717 * Return: 0 on success.
718 */
719static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
720 struct ieee80211_vif *vif,
721 const struct cfg80211_bitrate_mask *mask)
722{
723 struct rsi_hw *adapter = hw->priv;
724 struct rsi_common *common = adapter->priv;
725
726 mutex_lock(&common->mutex);
727
728 common->fixedrate_mask[IEEE80211_BAND_2GHZ] = 0;
729
730 if (mask->control[IEEE80211_BAND_2GHZ].legacy == 0xfff) {
731 common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
732 (mask->control[IEEE80211_BAND_2GHZ].ht_mcs[0] << 12);
733 } else {
734 common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
735 mask->control[IEEE80211_BAND_2GHZ].legacy;
736 }
737 mutex_unlock(&common->mutex);
738
739 return 0;
740}
741
742/**
743 * rsi_fill_rx_status() - This function fills rx status in
744 * ieee80211_rx_status structure.
745 * @hw: Pointer to the ieee80211_hw structure.
746 * @skb: Pointer to the socket buffer structure.
747 * @common: Pointer to the driver private structure.
748 * @rxs: Pointer to the ieee80211_rx_status structure.
749 *
750 * Return: None.
751 */
752static void rsi_fill_rx_status(struct ieee80211_hw *hw,
753 struct sk_buff *skb,
754 struct rsi_common *common,
755 struct ieee80211_rx_status *rxs)
756{
757 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
758 struct skb_info *rx_params = (struct skb_info *)info->driver_data;
759 struct ieee80211_hdr *hdr;
760 char rssi = rx_params->rssi;
761 u8 hdrlen = 0;
762 u8 channel = rx_params->channel;
763 s32 freq;
764
765 hdr = ((struct ieee80211_hdr *)(skb->data));
766 hdrlen = ieee80211_hdrlen(hdr->frame_control);
767
768 memset(info, 0, sizeof(struct ieee80211_tx_info));
769
770 rxs->signal = -(rssi);
771
772 if (channel <= 14)
773 rxs->band = IEEE80211_BAND_2GHZ;
774 else
775 rxs->band = IEEE80211_BAND_5GHZ;
776
777 freq = ieee80211_channel_to_frequency(channel, rxs->band);
778
779 if (freq)
780 rxs->freq = freq;
781
782 if (ieee80211_has_protected(hdr->frame_control)) {
783 if (rsi_is_cipher_wep(common)) {
784 memmove(skb->data + 4, skb->data, hdrlen);
785 skb_pull(skb, 4);
786 } else {
787 memmove(skb->data + 8, skb->data, hdrlen);
788 skb_pull(skb, 8);
789 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
790 }
791 rxs->flag |= RX_FLAG_DECRYPTED;
792 rxs->flag |= RX_FLAG_IV_STRIPPED;
793 }
794}
795
796/**
797 * rsi_indicate_pkt_to_os() - This function sends recieved packet to mac80211.
798 * @common: Pointer to the driver private structure.
799 * @skb: Pointer to the socket buffer structure.
800 *
801 * Return: None.
802 */
803void rsi_indicate_pkt_to_os(struct rsi_common *common,
804 struct sk_buff *skb)
805{
806 struct rsi_hw *adapter = common->priv;
807 struct ieee80211_hw *hw = adapter->hw;
808 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
809
810 if ((common->iface_down) || (!adapter->sc_nvifs)) {
811 dev_kfree_skb(skb);
812 return;
813 }
814
815 /* filling in the ieee80211_rx_status flags */
816 rsi_fill_rx_status(hw, skb, common, rx_status);
817
818 ieee80211_rx_irqsafe(hw, skb);
819}
820
821static void rsi_set_min_rate(struct ieee80211_hw *hw,
822 struct ieee80211_sta *sta,
823 struct rsi_common *common)
824{
825 u8 band = hw->conf.chandef.chan->band;
826 u8 ii;
827 u32 rate_bitmap;
828 bool matched = false;
829
830 common->bitrate_mask[band] = sta->supp_rates[band];
831
832 rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]);
833
834 if (rate_bitmap & 0xfff) {
835 /* Find out the min rate */
836 for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
837 if (rate_bitmap & BIT(ii)) {
838 common->min_rate = rsi_rates[ii].hw_value;
839 matched = true;
840 break;
841 }
842 }
843 }
844
845 common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
846
847 if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) {
848 for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) {
849 if ((rate_bitmap >> 12) & BIT(ii)) {
850 common->min_rate = rsi_mcsrates[ii];
851 matched = true;
852 break;
853 }
854 }
855 }
856
857 if (!matched)
858 common->min_rate = 0xffff;
859}
860
861/**
862 * rsi_mac80211_sta_add() - This function notifies driver about a peer getting
863 * connected.
864 * @hw: pointer to the ieee80211_hw structure.
865 * @vif: Pointer to the ieee80211_vif structure.
866 * @sta: Pointer to the ieee80211_sta structure.
867 *
868 * Return: 0 on success, -1 on failure.
869 */
870static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
871 struct ieee80211_vif *vif,
872 struct ieee80211_sta *sta)
873{
874 struct rsi_hw *adapter = hw->priv;
875 struct rsi_common *common = adapter->priv;
876
877 mutex_lock(&common->mutex);
878
879 rsi_set_min_rate(hw, sta, common);
880
881 if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
882 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) {
883 common->vif_info[0].sgi = true;
884 }
885
886 if (sta->ht_cap.ht_supported)
887 ieee80211_start_tx_ba_session(sta, 0, 0);
888
889 mutex_unlock(&common->mutex);
890
891 return 0;
892}
893
894/**
895 * rsi_mac80211_sta_remove() - This function notifies driver about a peer
896 * getting disconnected.
897 * @hw: Pointer to the ieee80211_hw structure.
898 * @vif: Pointer to the ieee80211_vif structure.
899 * @sta: Pointer to the ieee80211_sta structure.
900 *
901 * Return: 0 on success, -1 on failure.
902 */
903static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
904 struct ieee80211_vif *vif,
905 struct ieee80211_sta *sta)
906{
907 struct rsi_hw *adapter = hw->priv;
908 struct rsi_common *common = adapter->priv;
909
910 mutex_lock(&common->mutex);
911 /* Resetting all the fields to default values */
912 common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
913 common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
914 common->min_rate = 0xffff;
915 common->vif_info[0].is_ht = false;
916 common->vif_info[0].sgi = false;
917 common->vif_info[0].seq_start = 0;
918 common->secinfo.ptk_cipher = 0;
919 common->secinfo.gtk_cipher = 0;
920 mutex_unlock(&common->mutex);
921
922 return 0;
923}
924
925static struct ieee80211_ops mac80211_ops = {
926 .tx = rsi_mac80211_tx,
927 .start = rsi_mac80211_start,
928 .stop = rsi_mac80211_stop,
929 .add_interface = rsi_mac80211_add_interface,
930 .remove_interface = rsi_mac80211_remove_interface,
931 .config = rsi_mac80211_config,
932 .bss_info_changed = rsi_mac80211_bss_info_changed,
933 .conf_tx = rsi_mac80211_conf_tx,
934 .configure_filter = rsi_mac80211_conf_filter,
935 .set_key = rsi_mac80211_set_key,
936 .set_rts_threshold = rsi_mac80211_set_rts_threshold,
937 .set_bitrate_mask = rsi_mac80211_set_rate_mask,
938 .ampdu_action = rsi_mac80211_ampdu_action,
939 .sta_add = rsi_mac80211_sta_add,
940 .sta_remove = rsi_mac80211_sta_remove,
941};
942
943/**
944 * rsi_mac80211_attach() - This function is used to initialize Mac80211 stack.
945 * @common: Pointer to the driver private structure.
946 *
947 * Return: 0 on success, -1 on failure.
948 */
949int rsi_mac80211_attach(struct rsi_common *common)
950{
951 int status = 0;
952 struct ieee80211_hw *hw = NULL;
953 struct wiphy *wiphy = NULL;
954 struct rsi_hw *adapter = common->priv;
955 u8 addr_mask[ETH_ALEN] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x3};
956
957 rsi_dbg(INIT_ZONE, "%s: Performing mac80211 attach\n", __func__);
958
959 hw = ieee80211_alloc_hw(sizeof(struct rsi_hw), &mac80211_ops);
960 if (!hw) {
961 rsi_dbg(ERR_ZONE, "%s: ieee80211 hw alloc failed\n", __func__);
962 return -ENOMEM;
963 }
964
965 wiphy = hw->wiphy;
966
967 SET_IEEE80211_DEV(hw, adapter->device);
968
969 hw->priv = adapter;
970 adapter->hw = hw;
971
972 hw->flags = IEEE80211_HW_SIGNAL_DBM |
973 IEEE80211_HW_HAS_RATE_CONTROL |
974 IEEE80211_HW_AMPDU_AGGREGATION |
975 0;
976
977 hw->queues = MAX_HW_QUEUES;
978 hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
979
980 hw->max_rates = 1;
981 hw->max_rate_tries = MAX_RETRIES;
982
983 hw->max_tx_aggregation_subframes = 6;
984 rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
985 hw->rate_control_algorithm = "AARF";
986
987 SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
988 ether_addr_copy(hw->wiphy->addr_mask, addr_mask);
989
990 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
991 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
992 wiphy->retry_short = RETRY_SHORT;
993 wiphy->retry_long = RETRY_LONG;
994 wiphy->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
995 wiphy->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
996 wiphy->flags = 0;
997
998 wiphy->available_antennas_rx = 1;
999 wiphy->available_antennas_tx = 1;
1000 wiphy->bands[IEEE80211_BAND_2GHZ] =
1001 &adapter->sbands[IEEE80211_BAND_2GHZ];
1002
1003 status = ieee80211_register_hw(hw);
1004 if (status)
1005 return status;
1006
1007 return rsi_init_dbgfs(adapter);
1008}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
new file mode 100644
index 000000000000..8810862ae826
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -0,0 +1,295 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/firmware.h>
21#include "rsi_mgmt.h"
22#include "rsi_common.h"
23
24u32 rsi_zone_enabled = /* INFO_ZONE |
25 INIT_ZONE |
26 MGMT_TX_ZONE |
27 MGMT_RX_ZONE |
28 DATA_TX_ZONE |
29 DATA_RX_ZONE |
30 FSM_ZONE |
31 ISR_ZONE | */
32 ERR_ZONE |
33 0;
34EXPORT_SYMBOL_GPL(rsi_zone_enabled);
35
36/**
37 * rsi_dbg() - This function outputs informational messages.
38 * @zone: Zone of interest for output message.
39 * @fmt: printf-style format for output message.
40 *
41 * Return: none
42 */
43void rsi_dbg(u32 zone, const char *fmt, ...)
44{
45 struct va_format vaf;
46 va_list args;
47
48 va_start(args, fmt);
49
50 vaf.fmt = fmt;
51 vaf.va = &args;
52
53 if (zone & rsi_zone_enabled)
54 pr_info("%pV", &vaf);
55 va_end(args);
56}
57EXPORT_SYMBOL_GPL(rsi_dbg);
58
59/**
60 * rsi_prepare_skb() - This function prepares the skb.
61 * @common: Pointer to the driver private structure.
62 * @buffer: Pointer to the packet data.
63 * @pkt_len: Length of the packet.
64 * @extended_desc: Extended descriptor.
65 *
66 * Return: Successfully skb.
67 */
68static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
69 u8 *buffer,
70 u32 pkt_len,
71 u8 extended_desc)
72{
73 struct ieee80211_tx_info *info;
74 struct skb_info *rx_params;
75 struct sk_buff *skb = NULL;
76 u8 payload_offset;
77
78 if (WARN(!pkt_len, "%s: Dummy pkt received", __func__))
79 return NULL;
80
81 if (pkt_len > (RSI_RCV_BUFFER_LEN * 4)) {
82 rsi_dbg(ERR_ZONE, "%s: Pkt size > max rx buf size %d\n",
83 __func__, pkt_len);
84 pkt_len = RSI_RCV_BUFFER_LEN * 4;
85 }
86
87 pkt_len -= extended_desc;
88 skb = dev_alloc_skb(pkt_len + FRAME_DESC_SZ);
89 if (skb == NULL)
90 return NULL;
91
92 payload_offset = (extended_desc + FRAME_DESC_SZ);
93 skb_put(skb, pkt_len);
94 memcpy((skb->data), (buffer + payload_offset), skb->len);
95
96 info = IEEE80211_SKB_CB(skb);
97 rx_params = (struct skb_info *)info->driver_data;
98 rx_params->rssi = rsi_get_rssi(buffer);
99 rx_params->channel = rsi_get_connected_channel(common->priv);
100
101 return skb;
102}
103
104/**
105 * rsi_read_pkt() - This function reads frames from the card.
106 * @common: Pointer to the driver private structure.
107 * @rcv_pkt_len: Received pkt length. In case of USB it is 0.
108 *
109 * Return: 0 on success, -1 on failure.
110 */
111int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len)
112{
113 u8 *frame_desc = NULL, extended_desc = 0;
114 u32 index, length = 0, queueno = 0;
115 u16 actual_length = 0, offset;
116 struct sk_buff *skb = NULL;
117
118 index = 0;
119 do {
120 frame_desc = &common->rx_data_pkt[index];
121 actual_length = *(u16 *)&frame_desc[0];
122 offset = *(u16 *)&frame_desc[2];
123
124 queueno = rsi_get_queueno(frame_desc, offset);
125 length = rsi_get_length(frame_desc, offset);
126 extended_desc = rsi_get_extended_desc(frame_desc, offset);
127
128 switch (queueno) {
129 case RSI_WIFI_DATA_Q:
130 skb = rsi_prepare_skb(common,
131 (frame_desc + offset),
132 length,
133 extended_desc);
134 if (skb == NULL)
135 goto fail;
136
137 rsi_indicate_pkt_to_os(common, skb);
138 break;
139
140 case RSI_WIFI_MGMT_Q:
141 rsi_mgmt_pkt_recv(common, (frame_desc + offset));
142 break;
143
144 default:
145 rsi_dbg(ERR_ZONE, "%s: pkt from invalid queue: %d\n",
146 __func__, queueno);
147 goto fail;
148 }
149
150 index += actual_length;
151 rcv_pkt_len -= actual_length;
152 } while (rcv_pkt_len > 0);
153
154 return 0;
155fail:
156 return -EINVAL;
157}
158EXPORT_SYMBOL_GPL(rsi_read_pkt);
159
160/**
161 * rsi_tx_scheduler_thread() - This function is a kernel thread to send the
162 * packets to the device.
163 * @common: Pointer to the driver private structure.
164 *
165 * Return: None.
166 */
167static void rsi_tx_scheduler_thread(struct rsi_common *common)
168{
169 struct rsi_hw *adapter = common->priv;
170 u32 timeout = EVENT_WAIT_FOREVER;
171
172 do {
173 if (adapter->determine_event_timeout)
174 timeout = adapter->determine_event_timeout(adapter);
175 rsi_wait_event(&common->tx_thread.event, timeout);
176 rsi_reset_event(&common->tx_thread.event);
177
178 if (common->init_done)
179 rsi_core_qos_processor(common);
180 } while (atomic_read(&common->tx_thread.thread_done) == 0);
181 complete_and_exit(&common->tx_thread.completion, 0);
182}
183
184/**
185 * rsi_91x_init() - This function initializes os interface operations.
186 * @void: Void.
187 *
188 * Return: Pointer to the adapter structure on success, NULL on failure .
189 */
190struct rsi_hw *rsi_91x_init(void)
191{
192 struct rsi_hw *adapter = NULL;
193 struct rsi_common *common = NULL;
194 u8 ii = 0;
195
196 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
197 if (!adapter)
198 return NULL;
199
200 adapter->priv = kzalloc(sizeof(*common), GFP_KERNEL);
201 if (adapter->priv == NULL) {
202 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of memory\n",
203 __func__);
204 kfree(adapter);
205 return NULL;
206 } else {
207 common = adapter->priv;
208 common->priv = adapter;
209 }
210
211 for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
212 skb_queue_head_init(&common->tx_queue[ii]);
213
214 rsi_init_event(&common->tx_thread.event);
215 mutex_init(&common->mutex);
216 mutex_init(&common->tx_rxlock);
217
218 if (rsi_create_kthread(common,
219 &common->tx_thread,
220 rsi_tx_scheduler_thread,
221 "Tx-Thread")) {
222 rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
223 goto err;
224 }
225
226 common->init_done = true;
227 return adapter;
228
229err:
230 kfree(common);
231 kfree(adapter);
232 return NULL;
233}
234EXPORT_SYMBOL_GPL(rsi_91x_init);
235
236/**
237 * rsi_91x_deinit() - This function de-intializes os intf operations.
238 * @adapter: Pointer to the adapter structure.
239 *
240 * Return: None.
241 */
242void rsi_91x_deinit(struct rsi_hw *adapter)
243{
244 struct rsi_common *common = adapter->priv;
245 u8 ii;
246
247 rsi_dbg(INFO_ZONE, "%s: Performing deinit os ops\n", __func__);
248
249 rsi_kill_thread(&common->tx_thread);
250
251 for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
252 skb_queue_purge(&common->tx_queue[ii]);
253
254 common->init_done = false;
255
256 kfree(common);
257 kfree(adapter->rsi_dev);
258 kfree(adapter);
259}
260EXPORT_SYMBOL_GPL(rsi_91x_deinit);
261
262/**
263 * rsi_91x_hal_module_init() - This function is invoked when the module is
264 * loaded into the kernel.
265 * It registers the client driver.
266 * @void: Void.
267 *
268 * Return: 0 on success, -1 on failure.
269 */
270static int rsi_91x_hal_module_init(void)
271{
272 rsi_dbg(INIT_ZONE, "%s: Module init called\n", __func__);
273 return 0;
274}
275
276/**
277 * rsi_91x_hal_module_exit() - This function is called at the time of
278 * removing/unloading the module.
279 * It unregisters the client driver.
280 * @void: Void.
281 *
282 * Return: None.
283 */
284static void rsi_91x_hal_module_exit(void)
285{
286 rsi_dbg(INIT_ZONE, "%s: Module exit called\n", __func__);
287}
288
289module_init(rsi_91x_hal_module_init);
290module_exit(rsi_91x_hal_module_exit);
291MODULE_AUTHOR("Redpine Signals Inc");
292MODULE_DESCRIPTION("Station driver for RSI 91x devices");
293MODULE_SUPPORTED_DEVICE("RSI-91x");
294MODULE_VERSION("0.1");
295MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
new file mode 100644
index 000000000000..2361a6849ad7
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -0,0 +1,1304 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/etherdevice.h>
18#include "rsi_mgmt.h"
19#include "rsi_common.h"
20
21static struct bootup_params boot_params_20 = {
22 .magic_number = cpu_to_le16(0x5aa5),
23 .crystal_good_time = 0x0,
24 .valid = cpu_to_le32(VALID_20),
25 .reserved_for_valids = 0x0,
26 .bootup_mode_info = 0x0,
27 .digital_loop_back_params = 0x0,
28 .rtls_timestamp_en = 0x0,
29 .host_spi_intr_cfg = 0x0,
30 .device_clk_info = {{
31 .pll_config_g = {
32 .tapll_info_g = {
33 .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
34 (TA_PLL_M_VAL_20)),
35 .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
36 },
37 .pll960_info_g = {
38 .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
39 (PLL960_N_VAL_20)),
40 .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
41 .pll_reg_3 = 0x0,
42 },
43 .afepll_info_g = {
44 .pll_reg = cpu_to_le16(0x9f0),
45 }
46 },
47 .switch_clk_g = {
48 .switch_clk_info = cpu_to_le16(BIT(3)),
49 .bbp_lmac_clk_reg_val = cpu_to_le16(0x121),
50 .umac_clock_reg_config = 0x0,
51 .qspi_uart_clock_reg_config = 0x0
52 }
53 },
54 {
55 .pll_config_g = {
56 .tapll_info_g = {
57 .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
58 (TA_PLL_M_VAL_20)),
59 .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
60 },
61 .pll960_info_g = {
62 .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
63 (PLL960_N_VAL_20)),
64 .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
65 .pll_reg_3 = 0x0,
66 },
67 .afepll_info_g = {
68 .pll_reg = cpu_to_le16(0x9f0),
69 }
70 },
71 .switch_clk_g = {
72 .switch_clk_info = 0x0,
73 .bbp_lmac_clk_reg_val = 0x0,
74 .umac_clock_reg_config = 0x0,
75 .qspi_uart_clock_reg_config = 0x0
76 }
77 },
78 {
79 .pll_config_g = {
80 .tapll_info_g = {
81 .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
82 (TA_PLL_M_VAL_20)),
83 .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
84 },
85 .pll960_info_g = {
86 .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
87 (PLL960_N_VAL_20)),
88 .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
89 .pll_reg_3 = 0x0,
90 },
91 .afepll_info_g = {
92 .pll_reg = cpu_to_le16(0x9f0),
93 }
94 },
95 .switch_clk_g = {
96 .switch_clk_info = 0x0,
97 .bbp_lmac_clk_reg_val = 0x0,
98 .umac_clock_reg_config = 0x0,
99 .qspi_uart_clock_reg_config = 0x0
100 }
101 } },
102 .buckboost_wakeup_cnt = 0x0,
103 .pmu_wakeup_wait = 0x0,
104 .shutdown_wait_time = 0x0,
105 .pmu_slp_clkout_sel = 0x0,
106 .wdt_prog_value = 0x0,
107 .wdt_soc_rst_delay = 0x0,
108 .dcdc_operation_mode = 0x0,
109 .soc_reset_wait_cnt = 0x0
110};
111
112static struct bootup_params boot_params_40 = {
113 .magic_number = cpu_to_le16(0x5aa5),
114 .crystal_good_time = 0x0,
115 .valid = cpu_to_le32(VALID_40),
116 .reserved_for_valids = 0x0,
117 .bootup_mode_info = 0x0,
118 .digital_loop_back_params = 0x0,
119 .rtls_timestamp_en = 0x0,
120 .host_spi_intr_cfg = 0x0,
121 .device_clk_info = {{
122 .pll_config_g = {
123 .tapll_info_g = {
124 .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
125 (TA_PLL_M_VAL_40)),
126 .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
127 },
128 .pll960_info_g = {
129 .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
130 (PLL960_N_VAL_40)),
131 .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
132 .pll_reg_3 = 0x0,
133 },
134 .afepll_info_g = {
135 .pll_reg = cpu_to_le16(0x9f0),
136 }
137 },
138 .switch_clk_g = {
139 .switch_clk_info = cpu_to_le16(0x09),
140 .bbp_lmac_clk_reg_val = cpu_to_le16(0x1121),
141 .umac_clock_reg_config = cpu_to_le16(0x48),
142 .qspi_uart_clock_reg_config = 0x0
143 }
144 },
145 {
146 .pll_config_g = {
147 .tapll_info_g = {
148 .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
149 (TA_PLL_M_VAL_40)),
150 .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
151 },
152 .pll960_info_g = {
153 .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
154 (PLL960_N_VAL_40)),
155 .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
156 .pll_reg_3 = 0x0,
157 },
158 .afepll_info_g = {
159 .pll_reg = cpu_to_le16(0x9f0),
160 }
161 },
162 .switch_clk_g = {
163 .switch_clk_info = 0x0,
164 .bbp_lmac_clk_reg_val = 0x0,
165 .umac_clock_reg_config = 0x0,
166 .qspi_uart_clock_reg_config = 0x0
167 }
168 },
169 {
170 .pll_config_g = {
171 .tapll_info_g = {
172 .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
173 (TA_PLL_M_VAL_40)),
174 .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
175 },
176 .pll960_info_g = {
177 .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
178 (PLL960_N_VAL_40)),
179 .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
180 .pll_reg_3 = 0x0,
181 },
182 .afepll_info_g = {
183 .pll_reg = cpu_to_le16(0x9f0),
184 }
185 },
186 .switch_clk_g = {
187 .switch_clk_info = 0x0,
188 .bbp_lmac_clk_reg_val = 0x0,
189 .umac_clock_reg_config = 0x0,
190 .qspi_uart_clock_reg_config = 0x0
191 }
192 } },
193 .buckboost_wakeup_cnt = 0x0,
194 .pmu_wakeup_wait = 0x0,
195 .shutdown_wait_time = 0x0,
196 .pmu_slp_clkout_sel = 0x0,
197 .wdt_prog_value = 0x0,
198 .wdt_soc_rst_delay = 0x0,
199 .dcdc_operation_mode = 0x0,
200 .soc_reset_wait_cnt = 0x0
201};
202
203static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
204
205/**
206 * rsi_set_default_parameters() - This function sets default parameters.
207 * @common: Pointer to the driver private structure.
208 *
209 * Return: none
210 */
211static void rsi_set_default_parameters(struct rsi_common *common)
212{
213 common->band = IEEE80211_BAND_2GHZ;
214 common->channel_width = BW_20MHZ;
215 common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
216 common->channel = 1;
217 common->min_rate = 0xffff;
218 common->fsm_state = FSM_CARD_NOT_READY;
219 common->iface_down = true;
220}
221
222/**
223 * rsi_set_contention_vals() - This function sets the contention values for the
224 * backoff procedure.
225 * @common: Pointer to the driver private structure.
226 *
227 * Return: None.
228 */
229static void rsi_set_contention_vals(struct rsi_common *common)
230{
231 u8 ii = 0;
232
233 for (; ii < NUM_EDCA_QUEUES; ii++) {
234 common->tx_qinfo[ii].wme_params =
235 (((common->edca_params[ii].cw_min / 2) +
236 (common->edca_params[ii].aifs)) *
237 WMM_SHORT_SLOT_TIME + SIFS_DURATION);
238 common->tx_qinfo[ii].weight = common->tx_qinfo[ii].wme_params;
239 common->tx_qinfo[ii].pkt_contended = 0;
240 }
241}
242
243/**
244 * rsi_send_internal_mgmt_frame() - This function sends management frames to
245 * firmware.Also schedules packet to queue
246 * for transmission.
247 * @common: Pointer to the driver private structure.
248 * @skb: Pointer to the socket buffer structure.
249 *
250 * Return: 0 on success, -1 on failure.
251 */
252static int rsi_send_internal_mgmt_frame(struct rsi_common *common,
253 struct sk_buff *skb)
254{
255 struct skb_info *tx_params;
256
257 if (skb == NULL) {
258 rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__);
259 return -ENOMEM;
260 }
261 tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
262 tx_params->flags |= INTERNAL_MGMT_PKT;
263 skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb);
264 rsi_set_event(&common->tx_thread.event);
265 return 0;
266}
267
268/**
269 * rsi_load_radio_caps() - This function is used to send radio capabilities
270 * values to firmware.
271 * @common: Pointer to the driver private structure.
272 *
273 * Return: 0 on success, corresponding negative error code on failure.
274 */
275static int rsi_load_radio_caps(struct rsi_common *common)
276{
277 struct rsi_radio_caps *radio_caps;
278 struct rsi_hw *adapter = common->priv;
279 struct ieee80211_hw *hw = adapter->hw;
280 u16 inx = 0;
281 u8 ii;
282 u8 radio_id = 0;
283 u16 gc[20] = {0xf0, 0xf0, 0xf0, 0xf0,
284 0xf0, 0xf0, 0xf0, 0xf0,
285 0xf0, 0xf0, 0xf0, 0xf0,
286 0xf0, 0xf0, 0xf0, 0xf0,
287 0xf0, 0xf0, 0xf0, 0xf0};
288 struct ieee80211_conf *conf = &hw->conf;
289 struct sk_buff *skb;
290
291 rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__);
292
293 skb = dev_alloc_skb(sizeof(struct rsi_radio_caps));
294
295 if (!skb) {
296 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
297 __func__);
298 return -ENOMEM;
299 }
300
301 memset(skb->data, 0, sizeof(struct rsi_radio_caps));
302 radio_caps = (struct rsi_radio_caps *)skb->data;
303
304 radio_caps->desc_word[1] = cpu_to_le16(RADIO_CAPABILITIES);
305 radio_caps->desc_word[4] = cpu_to_le16(RSI_RF_TYPE << 8);
306
307 if (common->channel_width == BW_40MHZ) {
308 radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ);
309 radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ);
310 if (common->channel_width) {
311 radio_caps->desc_word[5] =
312 cpu_to_le16(common->channel_width << 12);
313 radio_caps->desc_word[5] |= cpu_to_le16(FULL40M_ENABLE);
314 }
315
316 if (conf_is_ht40_minus(conf)) {
317 radio_caps->desc_word[5] = 0;
318 radio_caps->desc_word[5] |=
319 cpu_to_le16(LOWER_20_ENABLE);
320 radio_caps->desc_word[5] |=
321 cpu_to_le16(LOWER_20_ENABLE >> 12);
322 }
323
324 if (conf_is_ht40_plus(conf)) {
325 radio_caps->desc_word[5] = 0;
326 radio_caps->desc_word[5] |=
327 cpu_to_le16(UPPER_20_ENABLE);
328 radio_caps->desc_word[5] |=
329 cpu_to_le16(UPPER_20_ENABLE >> 12);
330 }
331 }
332
333 radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8);
334
335 for (ii = 0; ii < MAX_HW_QUEUES; ii++) {
336 radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3);
337 radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f);
338 radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(2);
339 radio_caps->qos_params[ii].txop_q = 0;
340 }
341
342 for (ii = 0; ii < MAX_HW_QUEUES - 4; ii++) {
343 radio_caps->qos_params[ii].cont_win_min_q =
344 cpu_to_le16(common->edca_params[ii].cw_min);
345 radio_caps->qos_params[ii].cont_win_max_q =
346 cpu_to_le16(common->edca_params[ii].cw_max);
347 radio_caps->qos_params[ii].aifsn_val_q =
348 cpu_to_le16((common->edca_params[ii].aifs) << 8);
349 radio_caps->qos_params[ii].txop_q =
350 cpu_to_le16(common->edca_params[ii].txop);
351 }
352
353 memcpy(&common->rate_pwr[0], &gc[0], 40);
354 for (ii = 0; ii < 20; ii++)
355 radio_caps->gcpd_per_rate[inx++] =
356 cpu_to_le16(common->rate_pwr[ii] & 0x00FF);
357
358 radio_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_radio_caps) -
359 FRAME_DESC_SZ) |
360 (RSI_WIFI_MGMT_Q << 12));
361
362
363 skb_put(skb, (sizeof(struct rsi_radio_caps)));
364
365 return rsi_send_internal_mgmt_frame(common, skb);
366}
367
368/**
369 * rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module.
370 * @common: Pointer to the driver private structure.
371 * @msg: Pointer to received packet.
372 * @msg_len: Length of the recieved packet.
373 * @type: Type of recieved packet.
374 *
375 * Return: 0 on success, -1 on failure.
376 */
377static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
378 u8 *msg,
379 s32 msg_len,
380 u8 type)
381{
382 struct rsi_hw *adapter = common->priv;
383 struct ieee80211_tx_info *info;
384 struct skb_info *rx_params;
385 u8 pad_bytes = msg[4];
386 u8 pkt_recv;
387 struct sk_buff *skb;
388 char *buffer;
389
390 if (type == RX_DOT11_MGMT) {
391 if (!adapter->sc_nvifs)
392 return -ENOLINK;
393
394 msg_len -= pad_bytes;
395 if ((msg_len <= 0) || (!msg)) {
396 rsi_dbg(MGMT_RX_ZONE,
397 "%s: Invalid rx msg of len = %d\n",
398 __func__, msg_len);
399 return -EINVAL;
400 }
401
402 skb = dev_alloc_skb(msg_len);
403 if (!skb) {
404 rsi_dbg(ERR_ZONE, "%s: Failed to allocate skb\n",
405 __func__);
406 return -ENOMEM;
407 }
408
409 buffer = skb_put(skb, msg_len);
410
411 memcpy(buffer,
412 (u8 *)(msg + FRAME_DESC_SZ + pad_bytes),
413 msg_len);
414
415 pkt_recv = buffer[0];
416
417 info = IEEE80211_SKB_CB(skb);
418 rx_params = (struct skb_info *)info->driver_data;
419 rx_params->rssi = rsi_get_rssi(msg);
420 rx_params->channel = rsi_get_channel(msg);
421 rsi_indicate_pkt_to_os(common, skb);
422 } else {
423 rsi_dbg(MGMT_TX_ZONE, "%s: Internal Packet\n", __func__);
424 }
425
426 return 0;
427}
428
429/**
430 * rsi_hal_send_sta_notify_frame() - This function sends the station notify
431 * frame to firmware.
432 * @common: Pointer to the driver private structure.
433 * @opmode: Operating mode of device.
434 * @notify_event: Notification about station connection.
435 * @bssid: bssid.
436 * @qos_enable: Qos is enabled.
437 * @aid: Aid (unique for all STA).
438 *
439 * Return: status: 0 on success, corresponding negative error code on failure.
440 */
441static int rsi_hal_send_sta_notify_frame(struct rsi_common *common,
442 u8 opmode,
443 u8 notify_event,
444 const unsigned char *bssid,
445 u8 qos_enable,
446 u16 aid)
447{
448 struct sk_buff *skb = NULL;
449 struct rsi_peer_notify *peer_notify;
450 u16 vap_id = 0;
451 int status;
452
453 rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__);
454
455 skb = dev_alloc_skb(sizeof(struct rsi_peer_notify));
456
457 if (!skb) {
458 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
459 __func__);
460 return -ENOMEM;
461 }
462
463 memset(skb->data, 0, sizeof(struct rsi_peer_notify));
464 peer_notify = (struct rsi_peer_notify *)skb->data;
465
466 peer_notify->command = cpu_to_le16(opmode << 1);
467
468 switch (notify_event) {
469 case STA_CONNECTED:
470 peer_notify->command |= cpu_to_le16(RSI_ADD_PEER);
471 break;
472 case STA_DISCONNECTED:
473 peer_notify->command |= cpu_to_le16(RSI_DELETE_PEER);
474 break;
475 default:
476 break;
477 }
478
479 peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4);
480 ether_addr_copy(peer_notify->mac_addr, bssid);
481
482 peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0);
483
484 peer_notify->desc_word[0] =
485 cpu_to_le16((sizeof(struct rsi_peer_notify) - FRAME_DESC_SZ) |
486 (RSI_WIFI_MGMT_Q << 12));
487 peer_notify->desc_word[1] = cpu_to_le16(PEER_NOTIFY);
488 peer_notify->desc_word[7] |= cpu_to_le16(vap_id << 8);
489
490 skb_put(skb, sizeof(struct rsi_peer_notify));
491
492 status = rsi_send_internal_mgmt_frame(common, skb);
493
494 if (!status && qos_enable) {
495 rsi_set_contention_vals(common);
496 status = rsi_load_radio_caps(common);
497 }
498 return status;
499}
500
501/**
502 * rsi_send_aggregation_params_frame() - This function sends the ampdu
503 * indication frame to firmware.
504 * @common: Pointer to the driver private structure.
505 * @tid: traffic identifier.
506 * @ssn: ssn.
507 * @buf_size: buffer size.
508 * @event: notification about station connection.
509 *
510 * Return: 0 on success, corresponding negative error code on failure.
511 */
512int rsi_send_aggregation_params_frame(struct rsi_common *common,
513 u16 tid,
514 u16 ssn,
515 u8 buf_size,
516 u8 event)
517{
518 struct sk_buff *skb = NULL;
519 struct rsi_mac_frame *mgmt_frame;
520 u8 peer_id = 0;
521
522 skb = dev_alloc_skb(FRAME_DESC_SZ);
523
524 if (!skb) {
525 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
526 __func__);
527 return -ENOMEM;
528 }
529
530 memset(skb->data, 0, FRAME_DESC_SZ);
531 mgmt_frame = (struct rsi_mac_frame *)skb->data;
532
533 rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__);
534
535 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
536 mgmt_frame->desc_word[1] = cpu_to_le16(AMPDU_IND);
537
538 if (event == STA_TX_ADDBA_DONE) {
539 mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
540 mgmt_frame->desc_word[5] = cpu_to_le16(buf_size);
541 mgmt_frame->desc_word[7] =
542 cpu_to_le16((tid | (START_AMPDU_AGGR << 4) | (peer_id << 8)));
543 } else if (event == STA_RX_ADDBA_DONE) {
544 mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
545 mgmt_frame->desc_word[7] = cpu_to_le16(tid |
546 (START_AMPDU_AGGR << 4) |
547 (RX_BA_INDICATION << 5) |
548 (peer_id << 8));
549 } else if (event == STA_TX_DELBA) {
550 mgmt_frame->desc_word[7] = cpu_to_le16(tid |
551 (STOP_AMPDU_AGGR << 4) |
552 (peer_id << 8));
553 } else if (event == STA_RX_DELBA) {
554 mgmt_frame->desc_word[7] = cpu_to_le16(tid |
555 (STOP_AMPDU_AGGR << 4) |
556 (RX_BA_INDICATION << 5) |
557 (peer_id << 8));
558 }
559
560 skb_put(skb, FRAME_DESC_SZ);
561
562 return rsi_send_internal_mgmt_frame(common, skb);
563}
564
565/**
566 * rsi_program_bb_rf() - This function starts base band and RF programming.
567 * This is called after initial configurations are done.
568 * @common: Pointer to the driver private structure.
569 *
570 * Return: 0 on success, corresponding negative error code on failure.
571 */
572static int rsi_program_bb_rf(struct rsi_common *common)
573{
574 struct sk_buff *skb;
575 struct rsi_mac_frame *mgmt_frame;
576
577 rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__);
578
579 skb = dev_alloc_skb(FRAME_DESC_SZ);
580 if (!skb) {
581 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
582 __func__);
583 return -ENOMEM;
584 }
585
586 memset(skb->data, 0, FRAME_DESC_SZ);
587 mgmt_frame = (struct rsi_mac_frame *)skb->data;
588
589 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
590 mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA);
591 mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint << 8);
592
593 if (common->rf_reset) {
594 mgmt_frame->desc_word[7] = cpu_to_le16(RF_RESET_ENABLE);
595 rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n",
596 __func__);
597 common->rf_reset = 0;
598 }
599 common->bb_rf_prog_count = 1;
600 mgmt_frame->desc_word[7] |= cpu_to_le16(PUT_BBP_RESET |
601 BBP_REG_WRITE | (RSI_RF_TYPE << 4));
602 skb_put(skb, FRAME_DESC_SZ);
603
604 return rsi_send_internal_mgmt_frame(common, skb);
605}
606
607/**
608 * rsi_set_vap_capabilities() - This function send vap capability to firmware.
609 * @common: Pointer to the driver private structure.
610 * @opmode: Operating mode of device.
611 *
612 * Return: 0 on success, corresponding negative error code on failure.
613 */
614int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
615{
616 struct sk_buff *skb = NULL;
617 struct rsi_vap_caps *vap_caps;
618 u16 vap_id = 0;
619
620 rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__);
621
622 skb = dev_alloc_skb(sizeof(struct rsi_vap_caps));
623 if (!skb) {
624 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
625 __func__);
626 return -ENOMEM;
627 }
628
629 memset(skb->data, 0, sizeof(struct rsi_vap_caps));
630 vap_caps = (struct rsi_vap_caps *)skb->data;
631
632 vap_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_vap_caps) -
633 FRAME_DESC_SZ) |
634 (RSI_WIFI_MGMT_Q << 12));
635 vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES);
636 vap_caps->desc_word[4] = cpu_to_le16(mode |
637 (common->channel_width << 8));
638 vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) |
639 (common->mac_id << 4) |
640 common->radio_id);
641
642 memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN);
643 vap_caps->keep_alive_period = cpu_to_le16(90);
644 vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD);
645
646 vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
647 vap_caps->default_mgmt_rate = 0;
648 if (conf_is_ht40(&common->priv->hw->conf)) {
649 vap_caps->default_ctrl_rate =
650 cpu_to_le32(RSI_RATE_6 | FULL40M_ENABLE << 16);
651 } else {
652 vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
653 }
654 vap_caps->default_data_rate = 0;
655 vap_caps->beacon_interval = cpu_to_le16(200);
656 vap_caps->dtim_period = cpu_to_le16(4);
657
658 skb_put(skb, sizeof(*vap_caps));
659
660 return rsi_send_internal_mgmt_frame(common, skb);
661}
662
663/**
664 * rsi_hal_load_key() - This function is used to load keys within the firmware.
665 * @common: Pointer to the driver private structure.
666 * @data: Pointer to the key data.
667 * @key_len: Key length to be loaded.
668 * @key_type: Type of key: GROUP/PAIRWISE.
669 * @key_id: Key index.
670 * @cipher: Type of cipher used.
671 *
672 * Return: 0 on success, -1 on failure.
673 */
674int rsi_hal_load_key(struct rsi_common *common,
675 u8 *data,
676 u16 key_len,
677 u8 key_type,
678 u8 key_id,
679 u32 cipher)
680{
681 struct sk_buff *skb = NULL;
682 struct rsi_set_key *set_key;
683 u16 key_descriptor = 0;
684
685 rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__);
686
687 skb = dev_alloc_skb(sizeof(struct rsi_set_key));
688 if (!skb) {
689 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
690 __func__);
691 return -ENOMEM;
692 }
693
694 memset(skb->data, 0, sizeof(struct rsi_set_key));
695 set_key = (struct rsi_set_key *)skb->data;
696
697 if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
698 (cipher == WLAN_CIPHER_SUITE_WEP104)) {
699 key_len += 1;
700 key_descriptor |= BIT(2);
701 if (key_len >= 13)
702 key_descriptor |= BIT(3);
703 } else if (cipher != KEY_TYPE_CLEAR) {
704 key_descriptor |= BIT(4);
705 if (key_type == RSI_PAIRWISE_KEY)
706 key_id = 0;
707 if (cipher == WLAN_CIPHER_SUITE_TKIP)
708 key_descriptor |= BIT(5);
709 }
710 key_descriptor |= (key_type | BIT(13) | (key_id << 14));
711
712 set_key->desc_word[0] = cpu_to_le16((sizeof(struct rsi_set_key) -
713 FRAME_DESC_SZ) |
714 (RSI_WIFI_MGMT_Q << 12));
715 set_key->desc_word[1] = cpu_to_le16(SET_KEY_REQ);
716 set_key->desc_word[4] = cpu_to_le16(key_descriptor);
717
718 if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
719 (cipher == WLAN_CIPHER_SUITE_WEP104)) {
720 memcpy(&set_key->key[key_id][1],
721 data,
722 key_len * 2);
723 } else {
724 memcpy(&set_key->key[0][0], data, key_len);
725 }
726
727 memcpy(set_key->tx_mic_key, &data[16], 8);
728 memcpy(set_key->rx_mic_key, &data[24], 8);
729
730 skb_put(skb, sizeof(struct rsi_set_key));
731
732 return rsi_send_internal_mgmt_frame(common, skb);
733}
734
735/*
736 * rsi_load_bootup_params() - This function send bootup params to the firmware.
737 * @common: Pointer to the driver private structure.
738 *
739 * Return: 0 on success, corresponding error code on failure.
740 */
741static u8 rsi_load_bootup_params(struct rsi_common *common)
742{
743 struct sk_buff *skb;
744 struct rsi_boot_params *boot_params;
745
746 rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__);
747 skb = dev_alloc_skb(sizeof(struct rsi_boot_params));
748 if (!skb) {
749 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
750 __func__);
751 return -ENOMEM;
752 }
753
754 memset(skb->data, 0, sizeof(struct rsi_boot_params));
755 boot_params = (struct rsi_boot_params *)skb->data;
756
757 rsi_dbg(MGMT_TX_ZONE, "%s:\n", __func__);
758
759 if (common->channel_width == BW_40MHZ) {
760 memcpy(&boot_params->bootup_params,
761 &boot_params_40,
762 sizeof(struct bootup_params));
763 rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__,
764 UMAC_CLK_40BW);
765 boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40BW);
766 } else {
767 memcpy(&boot_params->bootup_params,
768 &boot_params_20,
769 sizeof(struct bootup_params));
770 if (boot_params_20.valid != cpu_to_le32(VALID_20)) {
771 boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_20BW);
772 rsi_dbg(MGMT_TX_ZONE,
773 "%s: Packet 20MHZ <=== %d\n", __func__,
774 UMAC_CLK_20BW);
775 } else {
776 boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40MHZ);
777 rsi_dbg(MGMT_TX_ZONE,
778 "%s: Packet 20MHZ <=== %d\n", __func__,
779 UMAC_CLK_40MHZ);
780 }
781 }
782
783 /**
784 * Bit{0:11} indicates length of the Packet
785 * Bit{12:15} indicates host queue number
786 */
787 boot_params->desc_word[0] = cpu_to_le16(sizeof(struct bootup_params) |
788 (RSI_WIFI_MGMT_Q << 12));
789 boot_params->desc_word[1] = cpu_to_le16(BOOTUP_PARAMS_REQUEST);
790
791 skb_put(skb, sizeof(struct rsi_boot_params));
792
793 return rsi_send_internal_mgmt_frame(common, skb);
794}
795
796/**
797 * rsi_send_reset_mac() - This function prepares reset MAC request and sends an
798 * internal management frame to indicate it to firmware.
799 * @common: Pointer to the driver private structure.
800 *
801 * Return: 0 on success, corresponding error code on failure.
802 */
803static int rsi_send_reset_mac(struct rsi_common *common)
804{
805 struct sk_buff *skb;
806 struct rsi_mac_frame *mgmt_frame;
807
808 rsi_dbg(MGMT_TX_ZONE, "%s: Sending reset MAC frame\n", __func__);
809
810 skb = dev_alloc_skb(FRAME_DESC_SZ);
811 if (!skb) {
812 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
813 __func__);
814 return -ENOMEM;
815 }
816
817 memset(skb->data, 0, FRAME_DESC_SZ);
818 mgmt_frame = (struct rsi_mac_frame *)skb->data;
819
820 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
821 mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ);
822 mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8);
823
824 skb_put(skb, FRAME_DESC_SZ);
825
826 return rsi_send_internal_mgmt_frame(common, skb);
827}
828
829/**
830 * rsi_set_channel() - This function programs the channel.
831 * @common: Pointer to the driver private structure.
832 * @channel: Channel value to be set.
833 *
834 * Return: 0 on success, corresponding error code on failure.
835 */
836int rsi_set_channel(struct rsi_common *common, u16 channel)
837{
838 struct sk_buff *skb = NULL;
839 struct rsi_mac_frame *mgmt_frame;
840
841 rsi_dbg(MGMT_TX_ZONE,
842 "%s: Sending scan req frame\n", __func__);
843
844 skb = dev_alloc_skb(FRAME_DESC_SZ);
845 if (!skb) {
846 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
847 __func__);
848 return -ENOMEM;
849 }
850
851 memset(skb->data, 0, FRAME_DESC_SZ);
852 mgmt_frame = (struct rsi_mac_frame *)skb->data;
853
854 if (common->band == IEEE80211_BAND_5GHZ) {
855 if ((channel >= 36) && (channel <= 64))
856 channel = ((channel - 32) / 4);
857 else if ((channel > 64) && (channel <= 140))
858 channel = ((channel - 102) / 4) + 8;
859 else if (channel >= 149)
860 channel = ((channel - 151) / 4) + 18;
861 else
862 return -EINVAL;
863 } else {
864 if (channel > 14) {
865 rsi_dbg(ERR_ZONE, "%s: Invalid chno %d, band = %d\n",
866 __func__, channel, common->band);
867 return -EINVAL;
868 }
869 }
870
871 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
872 mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
873 mgmt_frame->desc_word[4] = cpu_to_le16(channel);
874
875 mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET |
876 BBP_REG_WRITE |
877 (RSI_RF_TYPE << 4));
878
879 mgmt_frame->desc_word[5] = cpu_to_le16(0x01);
880
881 if (common->channel_width == BW_40MHZ)
882 mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8);
883
884 common->channel = channel;
885
886 skb_put(skb, FRAME_DESC_SZ);
887
888 return rsi_send_internal_mgmt_frame(common, skb);
889}
890
891/**
892 * rsi_compare() - This function is used to compare two integers
893 * @a: pointer to the first integer
894 * @b: pointer to the second integer
895 *
896 * Return: 0 if both are equal, -1 if the first is smaller, else 1
897 */
898static int rsi_compare(const void *a, const void *b)
899{
900 u16 _a = *(const u16 *)(a);
901 u16 _b = *(const u16 *)(b);
902
903 if (_a > _b)
904 return -1;
905
906 if (_a < _b)
907 return 1;
908
909 return 0;
910}
911
912/**
913 * rsi_map_rates() - This function is used to map selected rates to hw rates.
914 * @rate: The standard rate to be mapped.
915 * @offset: Offset that will be returned.
916 *
917 * Return: 0 if it is a mcs rate, else 1
918 */
919static bool rsi_map_rates(u16 rate, int *offset)
920{
921 int kk;
922 for (kk = 0; kk < ARRAY_SIZE(rsi_mcsrates); kk++) {
923 if (rate == mcs[kk]) {
924 *offset = kk;
925 return false;
926 }
927 }
928
929 for (kk = 0; kk < ARRAY_SIZE(rsi_rates); kk++) {
930 if (rate == rsi_rates[kk].bitrate / 5) {
931 *offset = kk;
932 break;
933 }
934 }
935 return true;
936}
937
938/**
939 * rsi_send_auto_rate_request() - This function is to set rates for connection
940 * and send autorate request to firmware.
941 * @common: Pointer to the driver private structure.
942 *
943 * Return: 0 on success, corresponding error code on failure.
944 */
945static int rsi_send_auto_rate_request(struct rsi_common *common)
946{
947 struct sk_buff *skb;
948 struct rsi_auto_rate *auto_rate;
949 int ii = 0, jj = 0, kk = 0;
950 struct ieee80211_hw *hw = common->priv->hw;
951 u8 band = hw->conf.chandef.chan->band;
952 u8 num_supported_rates = 0;
953 u8 rate_offset = 0;
954 u32 rate_bitmap = common->bitrate_mask[band];
955
956 u16 *selected_rates, min_rate;
957
958 skb = dev_alloc_skb(sizeof(struct rsi_auto_rate));
959 if (!skb) {
960 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
961 __func__);
962 return -ENOMEM;
963 }
964
965 selected_rates = kmalloc(2 * RSI_TBL_SZ, GFP_KERNEL);
966 if (!selected_rates) {
967 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
968 __func__);
969 return -ENOMEM;
970 }
971
972 memset(skb->data, 0, sizeof(struct rsi_auto_rate));
973 memset(selected_rates, 0, 2 * RSI_TBL_SZ);
974
975 auto_rate = (struct rsi_auto_rate *)skb->data;
976
977 auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f));
978 auto_rate->collision_tolerance = cpu_to_le16(3);
979 auto_rate->failure_limit = cpu_to_le16(3);
980 auto_rate->initial_boundary = cpu_to_le16(3);
981 auto_rate->max_threshold_limt = cpu_to_le16(27);
982
983 auto_rate->desc_word[1] = cpu_to_le16(AUTO_RATE_IND);
984
985 if (common->channel_width == BW_40MHZ)
986 auto_rate->desc_word[7] |= cpu_to_le16(1);
987
988 if (band == IEEE80211_BAND_2GHZ)
989 min_rate = STD_RATE_01;
990 else
991 min_rate = STD_RATE_06;
992
993 for (ii = 0, jj = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
994 if (rate_bitmap & BIT(ii)) {
995 selected_rates[jj++] = (rsi_rates[ii].bitrate / 5);
996 rate_offset++;
997 }
998 }
999 num_supported_rates = jj;
1000
1001 if (common->vif_info[0].is_ht) {
1002 for (ii = 0; ii < ARRAY_SIZE(mcs); ii++)
1003 selected_rates[jj++] = mcs[ii];
1004 num_supported_rates += ARRAY_SIZE(mcs);
1005 rate_offset += ARRAY_SIZE(mcs);
1006 }
1007
1008 if (rate_offset < (RSI_TBL_SZ / 2) - 1) {
1009 for (ii = jj; ii < (RSI_TBL_SZ / 2); ii++) {
1010 selected_rates[jj++] = min_rate;
1011 rate_offset++;
1012 }
1013 }
1014
1015 sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
1016
1017 /* mapping the rates to RSI rates */
1018 for (ii = 0; ii < jj; ii++) {
1019 if (rsi_map_rates(selected_rates[ii], &kk)) {
1020 auto_rate->supported_rates[ii] =
1021 cpu_to_le16(rsi_rates[kk].hw_value);
1022 } else {
1023 auto_rate->supported_rates[ii] =
1024 cpu_to_le16(rsi_mcsrates[kk]);
1025 }
1026 }
1027
1028 /* loading HT rates in the bottom half of the auto rate table */
1029 if (common->vif_info[0].is_ht) {
1030 if (common->vif_info[0].sgi)
1031 auto_rate->supported_rates[rate_offset++] =
1032 cpu_to_le16(RSI_RATE_MCS7_SG);
1033
1034 for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1;
1035 ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) {
1036 if (common->vif_info[0].sgi)
1037 auto_rate->supported_rates[ii++] =
1038 cpu_to_le16(rsi_mcsrates[kk] | BIT(9));
1039 auto_rate->supported_rates[ii] =
1040 cpu_to_le16(rsi_mcsrates[kk--]);
1041 }
1042
1043 for (; ii < RSI_TBL_SZ; ii++) {
1044 auto_rate->supported_rates[ii] =
1045 cpu_to_le16(rsi_mcsrates[0]);
1046 }
1047 }
1048
1049 auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2);
1050 auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2);
1051 auto_rate->desc_word[7] |= cpu_to_le16(0 << 8);
1052 num_supported_rates *= 2;
1053
1054 auto_rate->desc_word[0] = cpu_to_le16((sizeof(*auto_rate) -
1055 FRAME_DESC_SZ) |
1056 (RSI_WIFI_MGMT_Q << 12));
1057
1058 skb_put(skb,
1059 sizeof(struct rsi_auto_rate));
1060 kfree(selected_rates);
1061
1062 return rsi_send_internal_mgmt_frame(common, skb);
1063}
1064
1065/**
1066 * rsi_inform_bss_status() - This function informs about bss status with the
1067 * help of sta notify params by sending an internal
1068 * management frame to firmware.
1069 * @common: Pointer to the driver private structure.
1070 * @status: Bss status type.
1071 * @bssid: Bssid.
1072 * @qos_enable: Qos is enabled.
1073 * @aid: Aid (unique for all STAs).
1074 *
1075 * Return: None.
1076 */
1077void rsi_inform_bss_status(struct rsi_common *common,
1078 u8 status,
1079 const unsigned char *bssid,
1080 u8 qos_enable,
1081 u16 aid)
1082{
1083 if (status) {
1084 rsi_hal_send_sta_notify_frame(common,
1085 NL80211_IFTYPE_STATION,
1086 STA_CONNECTED,
1087 bssid,
1088 qos_enable,
1089 aid);
1090 if (common->min_rate == 0xffff)
1091 rsi_send_auto_rate_request(common);
1092 } else {
1093 rsi_hal_send_sta_notify_frame(common,
1094 NL80211_IFTYPE_STATION,
1095 STA_DISCONNECTED,
1096 bssid,
1097 qos_enable,
1098 aid);
1099 }
1100}
1101
1102/**
1103 * rsi_eeprom_read() - This function sends a frame to read the mac address
1104 * from the eeprom.
1105 * @common: Pointer to the driver private structure.
1106 *
1107 * Return: 0 on success, -1 on failure.
1108 */
1109static int rsi_eeprom_read(struct rsi_common *common)
1110{
1111 struct rsi_mac_frame *mgmt_frame;
1112 struct sk_buff *skb;
1113
1114 rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__);
1115
1116 skb = dev_alloc_skb(FRAME_DESC_SZ);
1117 if (!skb) {
1118 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
1119 __func__);
1120 return -ENOMEM;
1121 }
1122
1123 memset(skb->data, 0, FRAME_DESC_SZ);
1124 mgmt_frame = (struct rsi_mac_frame *)skb->data;
1125
1126 /* FrameType */
1127 mgmt_frame->desc_word[1] = cpu_to_le16(EEPROM_READ_TYPE);
1128 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
1129 /* Number of bytes to read */
1130 mgmt_frame->desc_word[3] = cpu_to_le16(ETH_ALEN +
1131 WLAN_MAC_MAGIC_WORD_LEN +
1132 WLAN_HOST_MODE_LEN +
1133 WLAN_FW_VERSION_LEN);
1134 /* Address to read */
1135 mgmt_frame->desc_word[4] = cpu_to_le16(WLAN_MAC_EEPROM_ADDR);
1136
1137 skb_put(skb, FRAME_DESC_SZ);
1138
1139 return rsi_send_internal_mgmt_frame(common, skb);
1140}
1141
1142/**
1143 * rsi_handle_ta_confirm_type() - This function handles the confirm frames.
1144 * @common: Pointer to the driver private structure.
1145 * @msg: Pointer to received packet.
1146 *
1147 * Return: 0 on success, -1 on failure.
1148 */
1149static int rsi_handle_ta_confirm_type(struct rsi_common *common,
1150 u8 *msg)
1151{
1152 u8 sub_type = (msg[15] & 0xff);
1153
1154 switch (sub_type) {
1155 case BOOTUP_PARAMS_REQUEST:
1156 rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n",
1157 __func__);
1158 if (common->fsm_state == FSM_BOOT_PARAMS_SENT) {
1159 if (rsi_eeprom_read(common)) {
1160 common->fsm_state = FSM_CARD_NOT_READY;
1161 goto out;
1162 } else {
1163 common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
1164 }
1165 } else {
1166 rsi_dbg(ERR_ZONE,
1167 "%s: Received bootup params cfm in %d state\n",
1168 __func__, common->fsm_state);
1169 return 0;
1170 }
1171 break;
1172
1173 case EEPROM_READ_TYPE:
1174 if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) {
1175 if (msg[16] == MAGIC_WORD) {
1176 u8 offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN
1177 + WLAN_MAC_MAGIC_WORD_LEN);
1178 memcpy(common->mac_addr,
1179 &msg[offset],
1180 ETH_ALEN);
1181 memcpy(&common->fw_ver,
1182 &msg[offset + ETH_ALEN],
1183 sizeof(struct version_info));
1184
1185 } else {
1186 common->fsm_state = FSM_CARD_NOT_READY;
1187 break;
1188 }
1189 if (rsi_send_reset_mac(common))
1190 goto out;
1191 else
1192 common->fsm_state = FSM_RESET_MAC_SENT;
1193 } else {
1194 rsi_dbg(ERR_ZONE,
1195 "%s: Received eeprom mac addr in %d state\n",
1196 __func__, common->fsm_state);
1197 return 0;
1198 }
1199 break;
1200
1201 case RESET_MAC_REQ:
1202 if (common->fsm_state == FSM_RESET_MAC_SENT) {
1203 rsi_dbg(FSM_ZONE, "%s: Reset MAC cfm received\n",
1204 __func__);
1205
1206 if (rsi_load_radio_caps(common))
1207 goto out;
1208 else
1209 common->fsm_state = FSM_RADIO_CAPS_SENT;
1210 } else {
1211 rsi_dbg(ERR_ZONE,
1212 "%s: Received reset mac cfm in %d state\n",
1213 __func__, common->fsm_state);
1214 return 0;
1215 }
1216 break;
1217
1218 case RADIO_CAPABILITIES:
1219 if (common->fsm_state == FSM_RADIO_CAPS_SENT) {
1220 common->rf_reset = 1;
1221 if (rsi_program_bb_rf(common)) {
1222 goto out;
1223 } else {
1224 common->fsm_state = FSM_BB_RF_PROG_SENT;
1225 rsi_dbg(FSM_ZONE, "%s: Radio cap cfm received\n",
1226 __func__);
1227 }
1228 } else {
1229 rsi_dbg(ERR_ZONE,
1230 "%s: Received radio caps cfm in %d state\n",
1231 __func__, common->fsm_state);
1232 return 0;
1233 }
1234 break;
1235
1236 case BB_PROG_VALUES_REQUEST:
1237 case RF_PROG_VALUES_REQUEST:
1238 case BBP_PROG_IN_TA:
1239 rsi_dbg(FSM_ZONE, "%s: BB/RF cfm received\n", __func__);
1240 if (common->fsm_state == FSM_BB_RF_PROG_SENT) {
1241 common->bb_rf_prog_count--;
1242 if (!common->bb_rf_prog_count) {
1243 common->fsm_state = FSM_MAC_INIT_DONE;
1244 return rsi_mac80211_attach(common);
1245 }
1246 } else {
1247 goto out;
1248 }
1249 break;
1250
1251 default:
1252 rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n",
1253 __func__);
1254 break;
1255 }
1256 return 0;
1257out:
1258 rsi_dbg(ERR_ZONE, "%s: Unable to send pkt/Invalid frame received\n",
1259 __func__);
1260 return -EINVAL;
1261}
1262
1263/**
1264 * rsi_mgmt_pkt_recv() - This function processes the management packets
1265 * recieved from the hardware.
1266 * @common: Pointer to the driver private structure.
1267 * @msg: Pointer to the received packet.
1268 *
1269 * Return: 0 on success, -1 on failure.
1270 */
1271int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
1272{
1273 s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
1274 u16 msg_type = (msg[2]);
1275
1276 rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
1277 __func__, msg_len, msg_type);
1278
1279 if (msg_type == TA_CONFIRM_TYPE) {
1280 return rsi_handle_ta_confirm_type(common, msg);
1281 } else if (msg_type == CARD_READY_IND) {
1282 rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n",
1283 __func__);
1284 if (common->fsm_state == FSM_CARD_NOT_READY) {
1285 rsi_set_default_parameters(common);
1286
1287 if (rsi_load_bootup_params(common))
1288 return -ENOMEM;
1289 else
1290 common->fsm_state = FSM_BOOT_PARAMS_SENT;
1291 } else {
1292 return -EINVAL;
1293 }
1294 } else if (msg_type == TX_STATUS_IND) {
1295 if (msg[15] == PROBEREQ_CONFIRM) {
1296 common->mgmt_q_block = false;
1297 rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n",
1298 __func__);
1299 }
1300 } else {
1301 return rsi_mgmt_pkt_to_core(common, msg, msg_len, msg_type);
1302 }
1303 return 0;
1304}
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
new file mode 100644
index 000000000000..8e48e72bae20
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c
@@ -0,0 +1,196 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "rsi_mgmt.h"
18
19/**
20 * rsi_send_data_pkt() - This function sends the recieved data packet from
21 * driver to device.
22 * @common: Pointer to the driver private structure.
23 * @skb: Pointer to the socket buffer structure.
24 *
25 * Return: status: 0 on success, -1 on failure.
26 */
27int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
28{
29 struct rsi_hw *adapter = common->priv;
30 struct ieee80211_hdr *tmp_hdr = NULL;
31 struct ieee80211_tx_info *info;
32 struct skb_info *tx_params;
33 struct ieee80211_bss_conf *bss = NULL;
34 int status = -EINVAL;
35 u8 ieee80211_size = MIN_802_11_HDR_LEN;
36 u8 extnd_size = 0;
37 __le16 *frame_desc;
38 u16 seq_num = 0;
39
40 info = IEEE80211_SKB_CB(skb);
41 bss = &info->control.vif->bss_conf;
42 tx_params = (struct skb_info *)info->driver_data;
43
44 if (!bss->assoc)
45 goto err;
46
47 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
48 seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
49
50 extnd_size = ((uintptr_t)skb->data & 0x3);
51
52 if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) {
53 rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
54 status = -ENOSPC;
55 goto err;
56 }
57
58 skb_push(skb, (FRAME_DESC_SZ + extnd_size));
59 frame_desc = (__le16 *)&skb->data[0];
60 memset((u8 *)frame_desc, 0, FRAME_DESC_SZ);
61
62 if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
63 ieee80211_size += 2;
64 frame_desc[6] |= cpu_to_le16(BIT(12));
65 }
66
67 if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
68 (common->secinfo.security_enable)) {
69 if (rsi_is_cipher_wep(common))
70 ieee80211_size += 4;
71 else
72 ieee80211_size += 8;
73 frame_desc[6] |= cpu_to_le16(BIT(15));
74 }
75
76 frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
77 (RSI_WIFI_DATA_Q << 12));
78 frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8);
79
80 if (common->min_rate != 0xffff) {
81 /* Send fixed rate */
82 frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE);
83 frame_desc[4] = cpu_to_le16(common->min_rate);
84 }
85
86 frame_desc[6] |= cpu_to_le16(seq_num & 0xfff);
87 frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) |
88 (skb->priority & 0xf) |
89 (tx_params->sta_id << 8));
90
91 status = adapter->host_intf_write_pkt(common->priv,
92 skb->data,
93 skb->len);
94 if (status)
95 rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n",
96 __func__);
97
98err:
99 ++common->tx_stats.total_tx_pkt_freed[skb->priority];
100 rsi_indicate_tx_status(common->priv, skb, status);
101 return status;
102}
103
104/**
105 * rsi_send_mgmt_pkt() - This functions sends the received management packet
106 * from driver to device.
107 * @common: Pointer to the driver private structure.
108 * @skb: Pointer to the socket buffer structure.
109 *
110 * Return: status: 0 on success, -1 on failure.
111 */
112int rsi_send_mgmt_pkt(struct rsi_common *common,
113 struct sk_buff *skb)
114{
115 struct rsi_hw *adapter = common->priv;
116 struct ieee80211_hdr *wh = NULL;
117 struct ieee80211_tx_info *info;
118 struct ieee80211_bss_conf *bss = NULL;
119 struct skb_info *tx_params;
120 int status = -E2BIG;
121 __le16 *msg = NULL;
122 u8 extnd_size = 0;
123 u8 vap_id = 0;
124
125 info = IEEE80211_SKB_CB(skb);
126 tx_params = (struct skb_info *)info->driver_data;
127 extnd_size = ((uintptr_t)skb->data & 0x3);
128
129 if (tx_params->flags & INTERNAL_MGMT_PKT) {
130 if ((extnd_size) > skb_headroom(skb)) {
131 rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
132 dev_kfree_skb(skb);
133 return -ENOSPC;
134 }
135 skb_push(skb, extnd_size);
136 skb->data[extnd_size + 4] = extnd_size;
137 status = adapter->host_intf_write_pkt(common->priv,
138 (u8 *)skb->data,
139 skb->len);
140 if (status) {
141 rsi_dbg(ERR_ZONE,
142 "%s: Failed to write the packet\n", __func__);
143 }
144 dev_kfree_skb(skb);
145 return status;
146 }
147
148 bss = &info->control.vif->bss_conf;
149 wh = (struct ieee80211_hdr *)&skb->data[0];
150
151 if (FRAME_DESC_SZ > skb_headroom(skb))
152 goto err;
153
154 skb_push(skb, FRAME_DESC_SZ);
155 memset(skb->data, 0, FRAME_DESC_SZ);
156 msg = (__le16 *)skb->data;
157
158 if (skb->len > MAX_MGMT_PKT_SIZE) {
159 rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__);
160 goto err;
161 }
162
163 msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
164 (RSI_WIFI_MGMT_Q << 12));
165 msg[1] = cpu_to_le16(TX_DOT11_MGMT);
166 msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8);
167 msg[3] = cpu_to_le16(RATE_INFO_ENABLE);
168 msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4);
169
170 if (wh->addr1[0] & BIT(0))
171 msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
172
173 if (common->band == IEEE80211_BAND_2GHZ)
174 msg[4] = cpu_to_le16(RSI_11B_MODE);
175 else
176 msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
177
178 /* Indicate to firmware to give cfm */
179 if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) {
180 msg[1] |= cpu_to_le16(BIT(10));
181 msg[7] = cpu_to_le16(PROBEREQ_CONFIRM);
182 common->mgmt_q_block = true;
183 }
184
185 msg[7] |= cpu_to_le16(vap_id << 8);
186
187 status = adapter->host_intf_write_pkt(common->priv,
188 (u8 *)msg,
189 skb->len);
190 if (status)
191 rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__);
192
193err:
194 rsi_indicate_tx_status(common->priv, skb, status);
195 return status;
196}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
new file mode 100644
index 000000000000..852453f386e2
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -0,0 +1,850 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 *
16 */
17
18#include <linux/module.h>
19#include "rsi_sdio.h"
20#include "rsi_common.h"
21
22/**
23 * rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
24 * @rw: Read/write
25 * @func: function number
26 * @raw: indicates whether to perform read after write
27 * @address: address to which to read/write
28 * @writedata: data to write
29 *
30 * Return: argument
31 */
32static u32 rsi_sdio_set_cmd52_arg(bool rw,
33 u8 func,
34 u8 raw,
35 u32 address,
36 u8 writedata)
37{
38 return ((rw & 1) << 31) | ((func & 0x7) << 28) |
39 ((raw & 1) << 27) | (1 << 26) |
40 ((address & 0x1FFFF) << 9) | (1 << 8) |
41 (writedata & 0xFF);
42}
43
44/**
45 * rsi_cmd52writebyte() - This function issues cmd52 byte write onto the card.
46 * @card: Pointer to the mmc_card.
47 * @address: Address to write.
48 * @byte: Data to write.
49 *
50 * Return: Write status.
51 */
52static int rsi_cmd52writebyte(struct mmc_card *card,
53 u32 address,
54 u8 byte)
55{
56 struct mmc_command io_cmd;
57 u32 arg;
58
59 memset(&io_cmd, 0, sizeof(io_cmd));
60 arg = rsi_sdio_set_cmd52_arg(1, 0, 0, address, byte);
61 io_cmd.opcode = SD_IO_RW_DIRECT;
62 io_cmd.arg = arg;
63 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
64
65 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
66}
67
68/**
69 * rsi_cmd52readbyte() - This function issues cmd52 byte read onto the card.
70 * @card: Pointer to the mmc_card.
71 * @address: Address to read from.
72 * @byte: Variable to store read value.
73 *
74 * Return: Read status.
75 */
76static int rsi_cmd52readbyte(struct mmc_card *card,
77 u32 address,
78 u8 *byte)
79{
80 struct mmc_command io_cmd;
81 u32 arg;
82 int err;
83
84 memset(&io_cmd, 0, sizeof(io_cmd));
85 arg = rsi_sdio_set_cmd52_arg(0, 0, 0, address, 0);
86 io_cmd.opcode = SD_IO_RW_DIRECT;
87 io_cmd.arg = arg;
88 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
89
90 err = mmc_wait_for_cmd(card->host, &io_cmd, 0);
91 if ((!err) && (byte))
92 *byte = io_cmd.resp[0] & 0xFF;
93 return err;
94}
95
96/**
97 * rsi_issue_sdiocommand() - This function issues sdio commands.
98 * @func: Pointer to the sdio_func structure.
99 * @opcode: Opcode value.
100 * @arg: Arguments to pass.
101 * @flags: Flags which are set.
102 * @resp: Pointer to store response.
103 *
104 * Return: err: command status as 0 or -1.
105 */
106static int rsi_issue_sdiocommand(struct sdio_func *func,
107 u32 opcode,
108 u32 arg,
109 u32 flags,
110 u32 *resp)
111{
112 struct mmc_command cmd;
113 struct mmc_host *host;
114 int err;
115
116 host = func->card->host;
117
118 memset(&cmd, 0, sizeof(struct mmc_command));
119 cmd.opcode = opcode;
120 cmd.arg = arg;
121 cmd.flags = flags;
122 err = mmc_wait_for_cmd(host, &cmd, 3);
123
124 if ((!err) && (resp))
125 *resp = cmd.resp[0];
126
127 return err;
128}
129
130/**
131 * rsi_handle_interrupt() - This function is called upon the occurence
132 * of an interrupt.
133 * @function: Pointer to the sdio_func structure.
134 *
135 * Return: None.
136 */
137static void rsi_handle_interrupt(struct sdio_func *function)
138{
139 struct rsi_hw *adapter = sdio_get_drvdata(function);
140
141 sdio_release_host(function);
142 rsi_interrupt_handler(adapter);
143 sdio_claim_host(function);
144}
145
146/**
147 * rsi_reset_card() - This function resets and re-initializes the card.
148 * @pfunction: Pointer to the sdio_func structure.
149 *
150 * Return: None.
151 */
152static void rsi_reset_card(struct sdio_func *pfunction)
153{
154 int ret = 0;
155 int err;
156 struct mmc_card *card = pfunction->card;
157 struct mmc_host *host = card->host;
158 s32 bit = (fls(host->ocr_avail) - 1);
159 u8 cmd52_resp;
160 u32 clock, resp, i;
161 u16 rca;
162
163 /* Reset 9110 chip */
164 ret = rsi_cmd52writebyte(pfunction->card,
165 SDIO_CCCR_ABORT,
166 (1 << 3));
167
168 /* Card will not send any response as it is getting reset immediately
169 * Hence expect a timeout status from host controller
170 */
171 if (ret != -ETIMEDOUT)
172 rsi_dbg(ERR_ZONE, "%s: Reset failed : %d\n", __func__, ret);
173
174 /* Wait for few milli seconds to get rid of residue charges if any */
175 msleep(20);
176
177 /* Initialize the SDIO card */
178 host->ios.vdd = bit;
179 host->ios.chip_select = MMC_CS_DONTCARE;
180 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
181 host->ios.power_mode = MMC_POWER_UP;
182 host->ios.bus_width = MMC_BUS_WIDTH_1;
183 host->ios.timing = MMC_TIMING_LEGACY;
184 host->ops->set_ios(host, &host->ios);
185
186 /*
187 * This delay should be sufficient to allow the power supply
188 * to reach the minimum voltage.
189 */
190 msleep(20);
191
192 host->ios.clock = host->f_min;
193 host->ios.power_mode = MMC_POWER_ON;
194 host->ops->set_ios(host, &host->ios);
195
196 /*
197 * This delay must be at least 74 clock sizes, or 1 ms, or the
198 * time required to reach a stable voltage.
199 */
200 msleep(20);
201
202 /* Issue CMD0. Goto idle state */
203 host->ios.chip_select = MMC_CS_HIGH;
204 host->ops->set_ios(host, &host->ios);
205 msleep(20);
206 err = rsi_issue_sdiocommand(pfunction,
207 MMC_GO_IDLE_STATE,
208 0,
209 (MMC_RSP_NONE | MMC_CMD_BC),
210 NULL);
211 host->ios.chip_select = MMC_CS_DONTCARE;
212 host->ops->set_ios(host, &host->ios);
213 msleep(20);
214 host->use_spi_crc = 0;
215
216 if (err)
217 rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
218
219 if (!host->ocr_avail) {
220 /* Issue CMD5, arg = 0 */
221 err = rsi_issue_sdiocommand(pfunction,
222 SD_IO_SEND_OP_COND,
223 0,
224 (MMC_RSP_R4 | MMC_CMD_BCR),
225 &resp);
226 if (err)
227 rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
228 __func__, err);
229 host->ocr_avail = resp;
230 }
231
232 /* Issue CMD5, arg = ocr. Wait till card is ready */
233 for (i = 0; i < 100; i++) {
234 err = rsi_issue_sdiocommand(pfunction,
235 SD_IO_SEND_OP_COND,
236 host->ocr_avail,
237 (MMC_RSP_R4 | MMC_CMD_BCR),
238 &resp);
239 if (err) {
240 rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
241 __func__, err);
242 break;
243 }
244
245 if (resp & MMC_CARD_BUSY)
246 break;
247 msleep(20);
248 }
249
250 if ((i == 100) || (err)) {
251 rsi_dbg(ERR_ZONE, "%s: card in not ready : %d %d\n",
252 __func__, i, err);
253 return;
254 }
255
256 /* Issue CMD3, get RCA */
257 err = rsi_issue_sdiocommand(pfunction,
258 SD_SEND_RELATIVE_ADDR,
259 0,
260 (MMC_RSP_R6 | MMC_CMD_BCR),
261 &resp);
262 if (err) {
263 rsi_dbg(ERR_ZONE, "%s: CMD3 failed : %d\n", __func__, err);
264 return;
265 }
266 rca = resp >> 16;
267 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
268 host->ops->set_ios(host, &host->ios);
269
270 /* Issue CMD7, select card */
271 err = rsi_issue_sdiocommand(pfunction,
272 MMC_SELECT_CARD,
273 (rca << 16),
274 (MMC_RSP_R1 | MMC_CMD_AC),
275 NULL);
276 if (err) {
277 rsi_dbg(ERR_ZONE, "%s: CMD7 failed : %d\n", __func__, err);
278 return;
279 }
280
281 /* Enable high speed */
282 if (card->host->caps & MMC_CAP_SD_HIGHSPEED) {
283 rsi_dbg(ERR_ZONE, "%s: Set high speed mode\n", __func__);
284 err = rsi_cmd52readbyte(card, SDIO_CCCR_SPEED, &cmd52_resp);
285 if (err) {
286 rsi_dbg(ERR_ZONE, "%s: CCCR speed reg read failed: %d\n",
287 __func__, err);
288 card->state &= ~MMC_STATE_HIGHSPEED;
289 } else {
290 err = rsi_cmd52writebyte(card,
291 SDIO_CCCR_SPEED,
292 (cmd52_resp | SDIO_SPEED_EHS));
293 if (err) {
294 rsi_dbg(ERR_ZONE,
295 "%s: CCR speed regwrite failed %d\n",
296 __func__, err);
297 return;
298 }
299 mmc_card_set_highspeed(card);
300 host->ios.timing = MMC_TIMING_SD_HS;
301 host->ops->set_ios(host, &host->ios);
302 }
303 }
304
305 /* Set clock */
306 if (mmc_card_highspeed(card))
307 clock = 50000000;
308 else
309 clock = card->cis.max_dtr;
310
311 if (clock > host->f_max)
312 clock = host->f_max;
313
314 host->ios.clock = clock;
315 host->ops->set_ios(host, &host->ios);
316
317 if (card->host->caps & MMC_CAP_4_BIT_DATA) {
318 /* CMD52: Set bus width & disable card detect resistor */
319 err = rsi_cmd52writebyte(card,
320 SDIO_CCCR_IF,
321 (SDIO_BUS_CD_DISABLE |
322 SDIO_BUS_WIDTH_4BIT));
323 if (err) {
324 rsi_dbg(ERR_ZONE, "%s: Set bus mode failed : %d\n",
325 __func__, err);
326 return;
327 }
328 host->ios.bus_width = MMC_BUS_WIDTH_4;
329 host->ops->set_ios(host, &host->ios);
330 }
331}
332
333/**
334 * rsi_setclock() - This function sets the clock frequency.
335 * @adapter: Pointer to the adapter structure.
336 * @freq: Clock frequency.
337 *
338 * Return: None.
339 */
340static void rsi_setclock(struct rsi_hw *adapter, u32 freq)
341{
342 struct rsi_91x_sdiodev *dev =
343 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
344 struct mmc_host *host = dev->pfunction->card->host;
345 u32 clock;
346
347 clock = freq * 1000;
348 if (clock > host->f_max)
349 clock = host->f_max;
350 host->ios.clock = clock;
351 host->ops->set_ios(host, &host->ios);
352}
353
354/**
355 * rsi_setblocklength() - This function sets the host block length.
356 * @adapter: Pointer to the adapter structure.
357 * @length: Block length to be set.
358 *
359 * Return: status: 0 on success, -1 on failure.
360 */
361static int rsi_setblocklength(struct rsi_hw *adapter, u32 length)
362{
363 struct rsi_91x_sdiodev *dev =
364 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
365 int status;
366 rsi_dbg(INIT_ZONE, "%s: Setting the block length\n", __func__);
367
368 status = sdio_set_block_size(dev->pfunction, length);
369 dev->pfunction->max_blksize = 256;
370
371 rsi_dbg(INFO_ZONE,
372 "%s: Operational blk length is %d\n", __func__, length);
373 return status;
374}
375
376/**
377 * rsi_setupcard() - This function queries and sets the card's features.
378 * @adapter: Pointer to the adapter structure.
379 *
380 * Return: status: 0 on success, -1 on failure.
381 */
382static int rsi_setupcard(struct rsi_hw *adapter)
383{
384 struct rsi_91x_sdiodev *dev =
385 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
386 int status = 0;
387
388 rsi_setclock(adapter, 50000);
389
390 dev->tx_blk_size = 256;
391 status = rsi_setblocklength(adapter, dev->tx_blk_size);
392 if (status)
393 rsi_dbg(ERR_ZONE,
394 "%s: Unable to set block length\n", __func__);
395 return status;
396}
397
398/**
399 * rsi_sdio_read_register() - This function reads one byte of information
400 * from a register.
401 * @adapter: Pointer to the adapter structure.
402 * @addr: Address of the register.
403 * @data: Pointer to the data that stores the data read.
404 *
405 * Return: 0 on success, -1 on failure.
406 */
407int rsi_sdio_read_register(struct rsi_hw *adapter,
408 u32 addr,
409 u8 *data)
410{
411 struct rsi_91x_sdiodev *dev =
412 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
413 u8 fun_num = 0;
414 int status;
415
416 sdio_claim_host(dev->pfunction);
417
418 if (fun_num == 0)
419 *data = sdio_f0_readb(dev->pfunction, addr, &status);
420 else
421 *data = sdio_readb(dev->pfunction, addr, &status);
422
423 sdio_release_host(dev->pfunction);
424
425 return status;
426}
427
428/**
429 * rsi_sdio_write_register() - This function writes one byte of information
430 * into a register.
431 * @adapter: Pointer to the adapter structure.
432 * @function: Function Number.
433 * @addr: Address of the register.
434 * @data: Pointer to the data tha has to be written.
435 *
436 * Return: 0 on success, -1 on failure.
437 */
438int rsi_sdio_write_register(struct rsi_hw *adapter,
439 u8 function,
440 u32 addr,
441 u8 *data)
442{
443 struct rsi_91x_sdiodev *dev =
444 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
445 int status = 0;
446
447 sdio_claim_host(dev->pfunction);
448
449 if (function == 0)
450 sdio_f0_writeb(dev->pfunction, *data, addr, &status);
451 else
452 sdio_writeb(dev->pfunction, *data, addr, &status);
453
454 sdio_release_host(dev->pfunction);
455
456 return status;
457}
458
459/**
460 * rsi_sdio_ack_intr() - This function acks the interrupt received.
461 * @adapter: Pointer to the adapter structure.
462 * @int_bit: Interrupt bit to write into register.
463 *
464 * Return: None.
465 */
466void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit)
467{
468 int status;
469 status = rsi_sdio_write_register(adapter,
470 1,
471 (SDIO_FUN1_INTR_CLR_REG |
472 RSI_SD_REQUEST_MASTER),
473 &int_bit);
474 if (status)
475 rsi_dbg(ERR_ZONE, "%s: unable to send ack\n", __func__);
476}
477
478
479
480/**
481 * rsi_sdio_read_register_multiple() - This function read multiple bytes of
482 * information from the SD card.
483 * @adapter: Pointer to the adapter structure.
484 * @addr: Address of the register.
485 * @count: Number of multiple bytes to be read.
486 * @data: Pointer to the read data.
487 *
488 * Return: 0 on success, -1 on failure.
489 */
490static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter,
491 u32 addr,
492 u32 count,
493 u8 *data)
494{
495 struct rsi_91x_sdiodev *dev =
496 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
497 u32 status;
498
499 sdio_claim_host(dev->pfunction);
500
501 status = sdio_readsb(dev->pfunction, data, addr, count);
502
503 sdio_release_host(dev->pfunction);
504
505 if (status != 0)
506 rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 read failed\n", __func__);
507 return status;
508}
509
510/**
511 * rsi_sdio_write_register_multiple() - This function writes multiple bytes of
512 * information to the SD card.
513 * @adapter: Pointer to the adapter structure.
514 * @addr: Address of the register.
515 * @data: Pointer to the data that has to be written.
516 * @count: Number of multiple bytes to be written.
517 *
518 * Return: 0 on success, -1 on failure.
519 */
520int rsi_sdio_write_register_multiple(struct rsi_hw *adapter,
521 u32 addr,
522 u8 *data,
523 u32 count)
524{
525 struct rsi_91x_sdiodev *dev =
526 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
527 int status;
528
529 if (dev->write_fail > 1) {
530 rsi_dbg(ERR_ZONE, "%s: Stopping card writes\n", __func__);
531 return 0;
532 } else if (dev->write_fail == 1) {
533 /**
534 * Assuming it is a CRC failure, we want to allow another
535 * card write
536 */
537 rsi_dbg(ERR_ZONE, "%s: Continue card writes\n", __func__);
538 dev->write_fail++;
539 }
540
541 sdio_claim_host(dev->pfunction);
542
543 status = sdio_writesb(dev->pfunction, addr, data, count);
544
545 sdio_release_host(dev->pfunction);
546
547 if (status) {
548 rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 write failed %d\n",
549 __func__, status);
550 dev->write_fail = 2;
551 } else {
552 memcpy(dev->prev_desc, data, FRAME_DESC_SZ);
553 }
554 return status;
555}
556
557/**
558 * rsi_sdio_host_intf_write_pkt() - This function writes the packet to device.
559 * @adapter: Pointer to the adapter structure.
560 * @pkt: Pointer to the data to be written on to the device.
561 * @len: length of the data to be written on to the device.
562 *
563 * Return: 0 on success, -1 on failure.
564 */
565static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
566 u8 *pkt,
567 u32 len)
568{
569 struct rsi_91x_sdiodev *dev =
570 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
571 u32 block_size = dev->tx_blk_size;
572 u32 num_blocks, address, length;
573 u32 queueno;
574 int status;
575
576 queueno = ((pkt[1] >> 4) & 0xf);
577
578 num_blocks = len / block_size;
579
580 if (len % block_size)
581 num_blocks++;
582
583 address = (num_blocks * block_size | (queueno << 12));
584 length = num_blocks * block_size;
585
586 status = rsi_sdio_write_register_multiple(adapter,
587 address,
588 (u8 *)pkt,
589 length);
590 if (status)
591 rsi_dbg(ERR_ZONE, "%s: Unable to write onto the card: %d\n",
592 __func__, status);
593 rsi_dbg(DATA_TX_ZONE, "%s: Successfully written onto card\n", __func__);
594 return status;
595}
596
597/**
598 * rsi_sdio_host_intf_read_pkt() - This function reads the packet
599 from the device.
600 * @adapter: Pointer to the adapter data structure.
601 * @pkt: Pointer to the packet data to be read from the the device.
602 * @length: Length of the data to be read from the device.
603 *
604 * Return: 0 on success, -1 on failure.
605 */
606int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter,
607 u8 *pkt,
608 u32 length)
609{
610 int status = -EINVAL;
611
612 if (!length) {
613 rsi_dbg(ERR_ZONE, "%s: Pkt size is zero\n", __func__);
614 return status;
615 }
616
617 status = rsi_sdio_read_register_multiple(adapter,
618 length,
619 length, /*num of bytes*/
620 (u8 *)pkt);
621
622 if (status)
623 rsi_dbg(ERR_ZONE, "%s: Failed to read frame: %d\n", __func__,
624 status);
625 return status;
626}
627
628/**
629 * rsi_init_sdio_interface() - This function does init specific to SDIO.
630 *
631 * @adapter: Pointer to the adapter data structure.
632 * @pkt: Pointer to the packet data to be read from the the device.
633 *
634 * Return: 0 on success, -1 on failure.
635 */
636
637static int rsi_init_sdio_interface(struct rsi_hw *adapter,
638 struct sdio_func *pfunction)
639{
640 struct rsi_91x_sdiodev *rsi_91x_dev;
641 int status = -ENOMEM;
642
643 rsi_91x_dev = kzalloc(sizeof(*rsi_91x_dev), GFP_KERNEL);
644 if (!rsi_91x_dev)
645 return status;
646
647 adapter->rsi_dev = rsi_91x_dev;
648
649 sdio_claim_host(pfunction);
650
651 pfunction->enable_timeout = 100;
652 status = sdio_enable_func(pfunction);
653 if (status) {
654 rsi_dbg(ERR_ZONE, "%s: Failed to enable interface\n", __func__);
655 sdio_release_host(pfunction);
656 return status;
657 }
658
659 rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
660
661 rsi_91x_dev->pfunction = pfunction;
662 adapter->device = &pfunction->dev;
663
664 sdio_set_drvdata(pfunction, adapter);
665
666 status = rsi_setupcard(adapter);
667 if (status) {
668 rsi_dbg(ERR_ZONE, "%s: Failed to setup card\n", __func__);
669 goto fail;
670 }
671
672 rsi_dbg(INIT_ZONE, "%s: Setup card succesfully\n", __func__);
673
674 status = rsi_init_sdio_slave_regs(adapter);
675 if (status) {
676 rsi_dbg(ERR_ZONE, "%s: Failed to init slave regs\n", __func__);
677 goto fail;
678 }
679 sdio_release_host(pfunction);
680
681 adapter->host_intf_write_pkt = rsi_sdio_host_intf_write_pkt;
682 adapter->host_intf_read_pkt = rsi_sdio_host_intf_read_pkt;
683 adapter->determine_event_timeout = rsi_sdio_determine_event_timeout;
684 adapter->check_hw_queue_status = rsi_sdio_read_buffer_status_register;
685
686#ifdef CONFIG_RSI_DEBUGFS
687 adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES;
688#endif
689 return status;
690fail:
691 sdio_disable_func(pfunction);
692 sdio_release_host(pfunction);
693 return status;
694}
695
696/**
697 * rsi_probe() - This function is called by kernel when the driver provided
698 * Vendor and device IDs are matched. All the initialization
699 * work is done here.
700 * @pfunction: Pointer to the sdio_func structure.
701 * @id: Pointer to sdio_device_id structure.
702 *
703 * Return: 0 on success, 1 on failure.
704 */
705static int rsi_probe(struct sdio_func *pfunction,
706 const struct sdio_device_id *id)
707{
708 struct rsi_hw *adapter;
709
710 rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
711
712 adapter = rsi_91x_init();
713 if (!adapter) {
714 rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
715 __func__);
716 return 1;
717 }
718
719 if (rsi_init_sdio_interface(adapter, pfunction)) {
720 rsi_dbg(ERR_ZONE, "%s: Failed to init sdio interface\n",
721 __func__);
722 goto fail;
723 }
724
725 if (rsi_sdio_device_init(adapter->priv)) {
726 rsi_dbg(ERR_ZONE, "%s: Failed in device init\n", __func__);
727 sdio_claim_host(pfunction);
728 sdio_disable_func(pfunction);
729 sdio_release_host(pfunction);
730 goto fail;
731 }
732
733 sdio_claim_host(pfunction);
734 if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
735 rsi_dbg(ERR_ZONE, "%s: Failed to request IRQ\n", __func__);
736 sdio_release_host(pfunction);
737 goto fail;
738 }
739
740 sdio_release_host(pfunction);
741 rsi_dbg(INIT_ZONE, "%s: Registered Interrupt handler\n", __func__);
742
743 return 0;
744fail:
745 rsi_91x_deinit(adapter);
746 rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
747 return 1;
748}
749
750/**
751 * rsi_disconnect() - This function performs the reverse of the probe function.
752 * @pfunction: Pointer to the sdio_func structure.
753 *
754 * Return: void.
755 */
756static void rsi_disconnect(struct sdio_func *pfunction)
757{
758 struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
759 struct rsi_91x_sdiodev *dev =
760 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
761
762 if (!adapter)
763 return;
764
765 dev->write_fail = 2;
766 rsi_mac80211_detach(adapter);
767
768 sdio_claim_host(pfunction);
769 sdio_release_irq(pfunction);
770 sdio_disable_func(pfunction);
771 rsi_91x_deinit(adapter);
772 /* Resetting to take care of the case, where-in driver is re-loaded */
773 rsi_reset_card(pfunction);
774 sdio_release_host(pfunction);
775}
776
777#ifdef CONFIG_PM
778static int rsi_suspend(struct device *dev)
779{
780 /* Not yet implemented */
781 return -ENOSYS;
782}
783
784static int rsi_resume(struct device *dev)
785{
786 /* Not yet implemented */
787 return -ENOSYS;
788}
789
790static const struct dev_pm_ops rsi_pm_ops = {
791 .suspend = rsi_suspend,
792 .resume = rsi_resume,
793};
794#endif
795
796static const struct sdio_device_id rsi_dev_table[] = {
797 { SDIO_DEVICE(0x303, 0x100) },
798 { SDIO_DEVICE(0x041B, 0x0301) },
799 { SDIO_DEVICE(0x041B, 0x0201) },
800 { SDIO_DEVICE(0x041B, 0x9330) },
801 { /* Blank */},
802};
803
804static struct sdio_driver rsi_driver = {
805 .name = "RSI-SDIO WLAN",
806 .probe = rsi_probe,
807 .remove = rsi_disconnect,
808 .id_table = rsi_dev_table,
809#ifdef CONFIG_PM
810 .drv = {
811 .pm = &rsi_pm_ops,
812 }
813#endif
814};
815
816/**
817 * rsi_module_init() - This function registers the sdio module.
818 * @void: Void.
819 *
820 * Return: 0 on success.
821 */
822static int rsi_module_init(void)
823{
824 sdio_register_driver(&rsi_driver);
825 rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
826 return 0;
827}
828
829/**
830 * rsi_module_exit() - This function unregisters the sdio module.
831 * @void: Void.
832 *
833 * Return: None.
834 */
835static void rsi_module_exit(void)
836{
837 sdio_unregister_driver(&rsi_driver);
838 rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
839}
840
841module_init(rsi_module_init);
842module_exit(rsi_module_exit);
843
844MODULE_AUTHOR("Redpine Signals Inc");
845MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");
846MODULE_SUPPORTED_DEVICE("RSI-91x");
847MODULE_DEVICE_TABLE(sdio, rsi_dev_table);
848MODULE_FIRMWARE(FIRMWARE_RSI9113);
849MODULE_VERSION("0.1");
850MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
new file mode 100644
index 000000000000..f1cb99cafed8
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -0,0 +1,566 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 *
16 */
17
18#include <linux/firmware.h>
19#include "rsi_sdio.h"
20#include "rsi_common.h"
21
22/**
23 * rsi_sdio_master_access_msword() - This function sets the AHB master access
24 * MS word in the SDIO slave registers.
25 * @adapter: Pointer to the adapter structure.
26 * @ms_word: ms word need to be initialized.
27 *
28 * Return: status: 0 on success, -1 on failure.
29 */
30static int rsi_sdio_master_access_msword(struct rsi_hw *adapter,
31 u16 ms_word)
32{
33 u8 byte;
34 u8 function = 0;
35 int status = 0;
36
37 byte = (u8)(ms_word & 0x00FF);
38
39 rsi_dbg(INIT_ZONE,
40 "%s: MASTER_ACCESS_MSBYTE:0x%x\n", __func__, byte);
41
42 status = rsi_sdio_write_register(adapter,
43 function,
44 SDIO_MASTER_ACCESS_MSBYTE,
45 &byte);
46 if (status) {
47 rsi_dbg(ERR_ZONE,
48 "%s: fail to access MASTER_ACCESS_MSBYTE\n",
49 __func__);
50 return -1;
51 }
52
53 byte = (u8)(ms_word >> 8);
54
55 rsi_dbg(INIT_ZONE, "%s:MASTER_ACCESS_LSBYTE:0x%x\n", __func__, byte);
56 status = rsi_sdio_write_register(adapter,
57 function,
58 SDIO_MASTER_ACCESS_LSBYTE,
59 &byte);
60 return status;
61}
62
63/**
64 * rsi_copy_to_card() - This function includes the actual funtionality of
65 * copying the TA firmware to the card.Basically this
66 * function includes opening the TA file,reading the
67 * TA file and writing their values in blocks of data.
68 * @common: Pointer to the driver private structure.
69 * @fw: Pointer to the firmware value to be written.
70 * @len: length of firmware file.
71 * @num_blocks: Number of blocks to be written to the card.
72 *
73 * Return: 0 on success and -1 on failure.
74 */
75static int rsi_copy_to_card(struct rsi_common *common,
76 const u8 *fw,
77 u32 len,
78 u32 num_blocks)
79{
80 struct rsi_hw *adapter = common->priv;
81 struct rsi_91x_sdiodev *dev =
82 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
83 u32 indx, ii;
84 u32 block_size = dev->tx_blk_size;
85 u32 lsb_address;
86 __le32 data[] = { TA_HOLD_THREAD_VALUE, TA_SOFT_RST_CLR,
87 TA_PC_ZERO, TA_RELEASE_THREAD_VALUE };
88 u32 address[] = { TA_HOLD_THREAD_REG, TA_SOFT_RESET_REG,
89 TA_TH0_PC_REG, TA_RELEASE_THREAD_REG };
90 u32 base_address;
91 u16 msb_address;
92
93 base_address = TA_LOAD_ADDRESS;
94 msb_address = base_address >> 16;
95
96 for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
97 lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
98 if (rsi_sdio_write_register_multiple(adapter,
99 lsb_address,
100 (u8 *)(fw + indx),
101 block_size)) {
102 rsi_dbg(ERR_ZONE,
103 "%s: Unable to load %s blk\n", __func__,
104 FIRMWARE_RSI9113);
105 return -1;
106 }
107 rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
108 base_address += block_size;
109 if ((base_address >> 16) != msb_address) {
110 msb_address += 1;
111 if (rsi_sdio_master_access_msword(adapter,
112 msb_address)) {
113 rsi_dbg(ERR_ZONE,
114 "%s: Unable to set ms word reg\n",
115 __func__);
116 return -1;
117 }
118 }
119 }
120
121 if (len % block_size) {
122 lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
123 if (rsi_sdio_write_register_multiple(adapter,
124 lsb_address,
125 (u8 *)(fw + indx),
126 len % block_size)) {
127 rsi_dbg(ERR_ZONE,
128 "%s: Unable to load f/w\n", __func__);
129 return -1;
130 }
131 }
132 rsi_dbg(INIT_ZONE,
133 "%s: Succesfully loaded TA instructions\n", __func__);
134
135 if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
136 rsi_dbg(ERR_ZONE,
137 "%s: Unable to set ms word to common reg\n",
138 __func__);
139 return -1;
140 }
141
142 for (ii = 0; ii < ARRAY_SIZE(data); ii++) {
143 /* Bringing TA out of reset */
144 if (rsi_sdio_write_register_multiple(adapter,
145 (address[ii] |
146 RSI_SD_REQUEST_MASTER),
147 (u8 *)&data[ii],
148 4)) {
149 rsi_dbg(ERR_ZONE,
150 "%s: Unable to hold TA threads\n", __func__);
151 return -1;
152 }
153 }
154
155 rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
156 return 0;
157}
158
159/**
160 * rsi_load_ta_instructions() - This function includes the actual funtionality
161 * of loading the TA firmware.This function also
162 * includes opening the TA file,reading the TA
163 * file and writing their value in blocks of data.
164 * @common: Pointer to the driver private structure.
165 *
166 * Return: status: 0 on success, -1 on failure.
167 */
168static int rsi_load_ta_instructions(struct rsi_common *common)
169{
170 struct rsi_hw *adapter = common->priv;
171 struct rsi_91x_sdiodev *dev =
172 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
173 u32 len;
174 u32 num_blocks;
175 const u8 *fw;
176 const struct firmware *fw_entry = NULL;
177 u32 block_size = dev->tx_blk_size;
178 int status = 0;
179 u32 base_address;
180 u16 msb_address;
181
182 if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
183 rsi_dbg(ERR_ZONE,
184 "%s: Unable to set ms word to common reg\n",
185 __func__);
186 return -1;
187 }
188 base_address = TA_LOAD_ADDRESS;
189 msb_address = (base_address >> 16);
190
191 if (rsi_sdio_master_access_msword(adapter, msb_address)) {
192 rsi_dbg(ERR_ZONE,
193 "%s: Unable to set ms word reg\n", __func__);
194 return -1;
195 }
196
197 status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
198 if (status < 0) {
199 rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
200 __func__, FIRMWARE_RSI9113);
201 return status;
202 }
203
204 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
205 len = fw_entry->size;
206
207 if (len % 4)
208 len += (4 - (len % 4));
209
210 num_blocks = (len / block_size);
211
212 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
213 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
214
215 status = rsi_copy_to_card(common, fw, len, num_blocks);
216 release_firmware(fw_entry);
217 return status;
218}
219
220/**
221 * rsi_process_pkt() - This Function reads rx_blocks register and figures out
222 * the size of the rx pkt.
223 * @common: Pointer to the driver private structure.
224 *
225 * Return: 0 on success, -1 on failure.
226 */
227static int rsi_process_pkt(struct rsi_common *common)
228{
229 struct rsi_hw *adapter = common->priv;
230 u8 num_blks = 0;
231 u32 rcv_pkt_len = 0;
232 int status = 0;
233
234 status = rsi_sdio_read_register(adapter,
235 SDIO_RX_NUM_BLOCKS_REG,
236 &num_blks);
237
238 if (status) {
239 rsi_dbg(ERR_ZONE,
240 "%s: Failed to read pkt length from the card:\n",
241 __func__);
242 return status;
243 }
244 rcv_pkt_len = (num_blks * 256);
245
246 common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL);
247 if (!common->rx_data_pkt) {
248 rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n",
249 __func__);
250 return -1;
251 }
252
253 status = rsi_sdio_host_intf_read_pkt(adapter,
254 common->rx_data_pkt,
255 rcv_pkt_len);
256 if (status) {
257 rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n",
258 __func__);
259 goto fail;
260 }
261
262 status = rsi_read_pkt(common, rcv_pkt_len);
263 kfree(common->rx_data_pkt);
264 return status;
265
266fail:
267 kfree(common->rx_data_pkt);
268 return -1;
269}
270
271/**
272 * rsi_init_sdio_slave_regs() - This function does the actual initialization
273 * of SDBUS slave registers.
274 * @adapter: Pointer to the adapter structure.
275 *
276 * Return: status: 0 on success, -1 on failure.
277 */
278int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
279{
280 struct rsi_91x_sdiodev *dev =
281 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
282 u8 function = 0;
283 u8 byte;
284 int status = 0;
285
286 if (dev->next_read_delay) {
287 byte = dev->next_read_delay;
288 status = rsi_sdio_write_register(adapter,
289 function,
290 SDIO_NXT_RD_DELAY2,
291 &byte);
292 if (status) {
293 rsi_dbg(ERR_ZONE,
294 "%s: Failed to write SDIO_NXT_RD_DELAY2\n",
295 __func__);
296 return -1;
297 }
298 }
299
300 if (dev->sdio_high_speed_enable) {
301 rsi_dbg(INIT_ZONE, "%s: Enabling SDIO High speed\n", __func__);
302 byte = 0x3;
303
304 status = rsi_sdio_write_register(adapter,
305 function,
306 SDIO_REG_HIGH_SPEED,
307 &byte);
308 if (status) {
309 rsi_dbg(ERR_ZONE,
310 "%s: Failed to enable SDIO high speed\n",
311 __func__);
312 return -1;
313 }
314 }
315
316 /* This tells SDIO FIFO when to start read to host */
317 rsi_dbg(INIT_ZONE, "%s: Initialzing SDIO read start level\n", __func__);
318 byte = 0x24;
319
320 status = rsi_sdio_write_register(adapter,
321 function,
322 SDIO_READ_START_LVL,
323 &byte);
324 if (status) {
325 rsi_dbg(ERR_ZONE,
326 "%s: Failed to write SDIO_READ_START_LVL\n", __func__);
327 return -1;
328 }
329
330 rsi_dbg(INIT_ZONE, "%s: Initialzing FIFO ctrl registers\n", __func__);
331 byte = (128 - 32);
332
333 status = rsi_sdio_write_register(adapter,
334 function,
335 SDIO_READ_FIFO_CTL,
336 &byte);
337 if (status) {
338 rsi_dbg(ERR_ZONE,
339 "%s: Failed to write SDIO_READ_FIFO_CTL\n", __func__);
340 return -1;
341 }
342
343 byte = 32;
344 status = rsi_sdio_write_register(adapter,
345 function,
346 SDIO_WRITE_FIFO_CTL,
347 &byte);
348 if (status) {
349 rsi_dbg(ERR_ZONE,
350 "%s: Failed to write SDIO_WRITE_FIFO_CTL\n", __func__);
351 return -1;
352 }
353
354 return 0;
355}
356
357/**
358 * rsi_interrupt_handler() - This function read and process SDIO interrupts.
359 * @adapter: Pointer to the adapter structure.
360 *
361 * Return: None.
362 */
363void rsi_interrupt_handler(struct rsi_hw *adapter)
364{
365 struct rsi_common *common = adapter->priv;
366 struct rsi_91x_sdiodev *dev =
367 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
368 int status;
369 enum sdio_interrupt_type isr_type;
370 u8 isr_status = 0;
371 u8 fw_status = 0;
372
373 dev->rx_info.sdio_int_counter++;
374
375 do {
376 mutex_lock(&common->tx_rxlock);
377 status = rsi_sdio_read_register(common->priv,
378 RSI_FN1_INT_REGISTER,
379 &isr_status);
380 if (status) {
381 rsi_dbg(ERR_ZONE,
382 "%s: Failed to Read Intr Status Register\n",
383 __func__);
384 mutex_unlock(&common->tx_rxlock);
385 return;
386 }
387
388 if (isr_status == 0) {
389 rsi_set_event(&common->tx_thread.event);
390 dev->rx_info.sdio_intr_status_zero++;
391 mutex_unlock(&common->tx_rxlock);
392 return;
393 }
394
395 rsi_dbg(ISR_ZONE, "%s: Intr_status = %x %d %d\n",
396 __func__, isr_status, (1 << MSDU_PKT_PENDING),
397 (1 << FW_ASSERT_IND));
398
399 do {
400 RSI_GET_SDIO_INTERRUPT_TYPE(isr_status, isr_type);
401
402 switch (isr_type) {
403 case BUFFER_AVAILABLE:
404 dev->rx_info.watch_bufferfull_count = 0;
405 dev->rx_info.buffer_full = false;
406 dev->rx_info.mgmt_buffer_full = false;
407 rsi_sdio_ack_intr(common->priv,
408 (1 << PKT_BUFF_AVAILABLE));
409 rsi_set_event((&common->tx_thread.event));
410 rsi_dbg(ISR_ZONE,
411 "%s: ==> BUFFER_AVILABLE <==\n",
412 __func__);
413 dev->rx_info.buf_avilable_counter++;
414 break;
415
416 case FIRMWARE_ASSERT_IND:
417 rsi_dbg(ERR_ZONE,
418 "%s: ==> FIRMWARE Assert <==\n",
419 __func__);
420 status = rsi_sdio_read_register(common->priv,
421 SDIO_FW_STATUS_REG,
422 &fw_status);
423 if (status) {
424 rsi_dbg(ERR_ZONE,
425 "%s: Failed to read f/w reg\n",
426 __func__);
427 } else {
428 rsi_dbg(ERR_ZONE,
429 "%s: Firmware Status is 0x%x\n",
430 __func__ , fw_status);
431 rsi_sdio_ack_intr(common->priv,
432 (1 << FW_ASSERT_IND));
433 }
434
435 common->fsm_state = FSM_CARD_NOT_READY;
436 break;
437
438 case MSDU_PACKET_PENDING:
439 rsi_dbg(ISR_ZONE, "Pkt pending interrupt\n");
440 dev->rx_info.total_sdio_msdu_pending_intr++;
441
442 status = rsi_process_pkt(common);
443 if (status) {
444 rsi_dbg(ERR_ZONE,
445 "%s: Failed to read pkt\n",
446 __func__);
447 mutex_unlock(&common->tx_rxlock);
448 return;
449 }
450 break;
451 default:
452 rsi_sdio_ack_intr(common->priv, isr_status);
453 dev->rx_info.total_sdio_unknown_intr++;
454 isr_status = 0;
455 rsi_dbg(ISR_ZONE,
456 "Unknown Interrupt %x\n",
457 isr_status);
458 break;
459 }
460 isr_status ^= BIT(isr_type - 1);
461 } while (isr_status);
462 mutex_unlock(&common->tx_rxlock);
463 } while (1);
464}
465
466/**
467 * rsi_device_init() - This Function Initializes The HAL.
468 * @common: Pointer to the driver private structure.
469 *
470 * Return: 0 on success, -1 on failure.
471 */
472int rsi_sdio_device_init(struct rsi_common *common)
473{
474 if (rsi_load_ta_instructions(common))
475 return -1;
476
477 if (rsi_sdio_master_access_msword(common->priv, MISC_CFG_BASE_ADDR)) {
478 rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n",
479 __func__);
480 return -1;
481 }
482 rsi_dbg(INIT_ZONE,
483 "%s: Setting ms word to 0x41050000\n", __func__);
484
485 return 0;
486}
487
488/**
489 * rsi_sdio_read_buffer_status_register() - This function is used to the read
490 * buffer status register and set
491 * relevant fields in
492 * rsi_91x_sdiodev struct.
493 * @adapter: Pointer to the driver hw structure.
494 * @q_num: The Q number whose status is to be found.
495 *
496 * Return: status: -1 on failure or else queue full/stop is indicated.
497 */
498int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num)
499{
500 struct rsi_common *common = adapter->priv;
501 struct rsi_91x_sdiodev *dev =
502 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
503 u8 buf_status = 0;
504 int status = 0;
505
506 status = rsi_sdio_read_register(common->priv,
507 RSI_DEVICE_BUFFER_STATUS_REGISTER,
508 &buf_status);
509
510 if (status) {
511 rsi_dbg(ERR_ZONE,
512 "%s: Failed to read status register\n", __func__);
513 return -1;
514 }
515
516 if (buf_status & (BIT(PKT_MGMT_BUFF_FULL))) {
517 if (!dev->rx_info.mgmt_buffer_full)
518 dev->rx_info.mgmt_buf_full_counter++;
519 dev->rx_info.mgmt_buffer_full = true;
520 } else {
521 dev->rx_info.mgmt_buffer_full = false;
522 }
523
524 if (buf_status & (BIT(PKT_BUFF_FULL))) {
525 if (!dev->rx_info.buffer_full)
526 dev->rx_info.buf_full_counter++;
527 dev->rx_info.buffer_full = true;
528 } else {
529 dev->rx_info.buffer_full = false;
530 }
531
532 if (buf_status & (BIT(PKT_BUFF_SEMI_FULL))) {
533 if (!dev->rx_info.semi_buffer_full)
534 dev->rx_info.buf_semi_full_counter++;
535 dev->rx_info.semi_buffer_full = true;
536 } else {
537 dev->rx_info.semi_buffer_full = false;
538 }
539
540 if ((q_num == MGMT_SOFT_Q) && (dev->rx_info.mgmt_buffer_full))
541 return QUEUE_FULL;
542
543 if (dev->rx_info.buffer_full)
544 return QUEUE_FULL;
545
546 return QUEUE_NOT_FULL;
547}
548
549/**
550 * rsi_sdio_determine_event_timeout() - This Function determines the event
551 * timeout duration.
552 * @adapter: Pointer to the adapter structure.
553 *
554 * Return: timeout duration is returned.
555 */
556int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter)
557{
558 struct rsi_91x_sdiodev *dev =
559 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
560
561 /* Once buffer full is seen, event timeout to occur every 2 msecs */
562 if (dev->rx_info.buffer_full)
563 return 2;
564
565 return EVENT_WAIT_FOREVER;
566}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
new file mode 100644
index 000000000000..bb1bf96670eb
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -0,0 +1,575 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 *
16 */
17
18#include <linux/module.h>
19#include "rsi_usb.h"
20
21/**
22 * rsi_usb_card_write() - This function writes to the USB Card.
23 * @adapter: Pointer to the adapter structure.
24 * @buf: Pointer to the buffer from where the data has to be taken.
25 * @len: Length to be written.
26 * @endpoint: Type of endpoint.
27 *
28 * Return: status: 0 on success, -1 on failure.
29 */
30static int rsi_usb_card_write(struct rsi_hw *adapter,
31 void *buf,
32 u16 len,
33 u8 endpoint)
34{
35 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
36 int status;
37 s32 transfer;
38
39 status = usb_bulk_msg(dev->usbdev,
40 usb_sndbulkpipe(dev->usbdev,
41 dev->bulkout_endpoint_addr[endpoint - 1]),
42 buf,
43 len,
44 &transfer,
45 HZ * 5);
46
47 if (status < 0) {
48 rsi_dbg(ERR_ZONE,
49 "Card write failed with error code :%10d\n", status);
50 dev->write_fail = 1;
51 }
52 return status;
53}
54
55/**
56 * rsi_write_multiple() - This function writes multiple bytes of information
57 * to the USB card.
58 * @adapter: Pointer to the adapter structure.
59 * @addr: Address of the register.
60 * @data: Pointer to the data that has to be written.
61 * @count: Number of multiple bytes to be written.
62 *
63 * Return: 0 on success, -1 on failure.
64 */
65static int rsi_write_multiple(struct rsi_hw *adapter,
66 u8 endpoint,
67 u8 *data,
68 u32 count)
69{
70 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
71 u8 *seg = dev->tx_buffer;
72
73 if (dev->write_fail)
74 return 0;
75
76 if (endpoint == MGMT_EP) {
77 memset(seg, 0, RSI_USB_TX_HEAD_ROOM);
78 memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count);
79 } else {
80 seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM);
81 }
82
83 return rsi_usb_card_write(adapter,
84 seg,
85 count + RSI_USB_TX_HEAD_ROOM,
86 endpoint);
87}
88
89/**
90 * rsi_find_bulk_in_and_out_endpoints() - This function initializes the bulk
91 * endpoints to the device.
92 * @interface: Pointer to the USB interface structure.
93 * @adapter: Pointer to the adapter structure.
94 *
95 * Return: ret_val: 0 on success, -ENOMEM on failure.
96 */
97static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
98 struct rsi_hw *adapter)
99{
100 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
101 struct usb_host_interface *iface_desc;
102 struct usb_endpoint_descriptor *endpoint;
103 __le16 buffer_size;
104 int ii, bep_found = 0;
105
106 iface_desc = &(interface->altsetting[0]);
107
108 for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
109 endpoint = &(iface_desc->endpoint[ii].desc);
110
111 if ((!(dev->bulkin_endpoint_addr)) &&
112 (endpoint->bEndpointAddress & USB_DIR_IN) &&
113 ((endpoint->bmAttributes &
114 USB_ENDPOINT_XFERTYPE_MASK) ==
115 USB_ENDPOINT_XFER_BULK)) {
116 buffer_size = endpoint->wMaxPacketSize;
117 dev->bulkin_size = buffer_size;
118 dev->bulkin_endpoint_addr =
119 endpoint->bEndpointAddress;
120 }
121
122 if (!dev->bulkout_endpoint_addr[bep_found] &&
123 !(endpoint->bEndpointAddress & USB_DIR_IN) &&
124 ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
125 USB_ENDPOINT_XFER_BULK)) {
126 dev->bulkout_endpoint_addr[bep_found] =
127 endpoint->bEndpointAddress;
128 buffer_size = endpoint->wMaxPacketSize;
129 dev->bulkout_size[bep_found] = buffer_size;
130 bep_found++;
131 }
132
133 if (bep_found >= MAX_BULK_EP)
134 break;
135 }
136
137 if (!(dev->bulkin_endpoint_addr) &&
138 (dev->bulkout_endpoint_addr[0]))
139 return -EINVAL;
140
141 return 0;
142}
143
144/* rsi_usb_reg_read() - This function reads data from given register address.
145 * @usbdev: Pointer to the usb_device structure.
146 * @reg: Address of the register to be read.
147 * @value: Value to be read.
148 * @len: length of data to be read.
149 *
150 * Return: status: 0 on success, -1 on failure.
151 */
152static int rsi_usb_reg_read(struct usb_device *usbdev,
153 u32 reg,
154 u16 *value,
155 u16 len)
156{
157 u8 temp_buf[4];
158 int status = 0;
159
160 status = usb_control_msg(usbdev,
161 usb_rcvctrlpipe(usbdev, 0),
162 USB_VENDOR_REGISTER_READ,
163 USB_TYPE_VENDOR,
164 ((reg & 0xffff0000) >> 16), (reg & 0xffff),
165 (void *)temp_buf,
166 len,
167 HZ * 5);
168
169 *value = (temp_buf[0] | (temp_buf[1] << 8));
170 if (status < 0) {
171 rsi_dbg(ERR_ZONE,
172 "%s: Reg read failed with error code :%d\n",
173 __func__, status);
174 }
175 return status;
176}
177
178/**
179 * rsi_usb_reg_write() - This function writes the given data into the given
180 * register address.
181 * @usbdev: Pointer to the usb_device structure.
182 * @reg: Address of the register.
183 * @value: Value to write.
184 * @len: Length of data to be written.
185 *
186 * Return: status: 0 on success, -1 on failure.
187 */
188static int rsi_usb_reg_write(struct usb_device *usbdev,
189 u32 reg,
190 u16 value,
191 u16 len)
192{
193 u8 usb_reg_buf[4];
194 int status = 0;
195
196 usb_reg_buf[0] = (value & 0x00ff);
197 usb_reg_buf[1] = (value & 0xff00) >> 8;
198 usb_reg_buf[2] = 0x0;
199 usb_reg_buf[3] = 0x0;
200
201 status = usb_control_msg(usbdev,
202 usb_sndctrlpipe(usbdev, 0),
203 USB_VENDOR_REGISTER_WRITE,
204 USB_TYPE_VENDOR,
205 ((reg & 0xffff0000) >> 16),
206 (reg & 0xffff),
207 (void *)usb_reg_buf,
208 len,
209 HZ * 5);
210 if (status < 0) {
211 rsi_dbg(ERR_ZONE,
212 "%s: Reg write failed with error code :%d\n",
213 __func__, status);
214 }
215 return status;
216}
217
218/**
219 * rsi_rx_done_handler() - This function is called when a packet is received
220 * from USB stack. This is callback to recieve done.
221 * @urb: Received URB.
222 *
223 * Return: None.
224 */
225static void rsi_rx_done_handler(struct urb *urb)
226{
227 struct rsi_hw *adapter = urb->context;
228 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
229
230 if (urb->status)
231 return;
232
233 rsi_set_event(&dev->rx_thread.event);
234}
235
236/**
237 * rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
238 * @adapter: Pointer to the adapter structure.
239 *
240 * Return: 0 on success, -1 on failure.
241 */
242static int rsi_rx_urb_submit(struct rsi_hw *adapter)
243{
244 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
245 struct urb *urb = dev->rx_usb_urb[0];
246 int status;
247
248 usb_fill_bulk_urb(urb,
249 dev->usbdev,
250 usb_rcvbulkpipe(dev->usbdev,
251 dev->bulkin_endpoint_addr),
252 urb->transfer_buffer,
253 3000,
254 rsi_rx_done_handler,
255 adapter);
256
257 status = usb_submit_urb(urb, GFP_KERNEL);
258 if (status)
259 rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
260
261 return status;
262}
263
264/**
265 * rsi_usb_write_register_multiple() - This function writes multiple bytes of
266 * information to multiple registers.
267 * @adapter: Pointer to the adapter structure.
268 * @addr: Address of the register.
269 * @data: Pointer to the data that has to be written.
270 * @count: Number of multiple bytes to be written on to the registers.
271 *
272 * Return: status: 0 on success, -1 on failure.
273 */
274int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
275 u32 addr,
276 u8 *data,
277 u32 count)
278{
279 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
280 u8 *buf;
281 u8 transfer;
282 int status = 0;
283
284 buf = kzalloc(4096, GFP_KERNEL);
285 if (!buf)
286 return -ENOMEM;
287
288 while (count) {
289 transfer = min_t(int, count, 4096);
290 memcpy(buf, data, transfer);
291 status = usb_control_msg(dev->usbdev,
292 usb_sndctrlpipe(dev->usbdev, 0),
293 USB_VENDOR_REGISTER_WRITE,
294 USB_TYPE_VENDOR,
295 ((addr & 0xffff0000) >> 16),
296 (addr & 0xffff),
297 (void *)buf,
298 transfer,
299 HZ * 5);
300 if (status < 0) {
301 rsi_dbg(ERR_ZONE,
302 "Reg write failed with error code :%d\n",
303 status);
304 } else {
305 count -= transfer;
306 data += transfer;
307 addr += transfer;
308 }
309 }
310
311 kfree(buf);
312 return 0;
313}
314
315/**
316 *rsi_usb_host_intf_write_pkt() - This function writes the packet to the
317 * USB card.
318 * @adapter: Pointer to the adapter structure.
319 * @pkt: Pointer to the data to be written on to the card.
320 * @len: Length of the data to be written on to the card.
321 *
322 * Return: 0 on success, -1 on failure.
323 */
324static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter,
325 u8 *pkt,
326 u32 len)
327{
328 u32 queueno = ((pkt[1] >> 4) & 0xf);
329 u8 endpoint;
330
331 endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP);
332
333 return rsi_write_multiple(adapter,
334 endpoint,
335 (u8 *)pkt,
336 len);
337}
338
339/**
340 * rsi_deinit_usb_interface() - This function deinitializes the usb interface.
341 * @adapter: Pointer to the adapter structure.
342 *
343 * Return: None.
344 */
345static void rsi_deinit_usb_interface(struct rsi_hw *adapter)
346{
347 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
348
349 rsi_kill_thread(&dev->rx_thread);
350 kfree(adapter->priv->rx_data_pkt);
351 kfree(dev->tx_buffer);
352}
353
354/**
355 * rsi_init_usb_interface() - This function initializes the usb interface.
356 * @adapter: Pointer to the adapter structure.
357 * @pfunction: Pointer to USB interface structure.
358 *
359 * Return: 0 on success, -1 on failure.
360 */
361static int rsi_init_usb_interface(struct rsi_hw *adapter,
362 struct usb_interface *pfunction)
363{
364 struct rsi_91x_usbdev *rsi_dev;
365 struct rsi_common *common = adapter->priv;
366 int status;
367
368 rsi_dev = kzalloc(sizeof(*rsi_dev), GFP_KERNEL);
369 if (!rsi_dev)
370 return -ENOMEM;
371
372 adapter->rsi_dev = rsi_dev;
373 rsi_dev->usbdev = interface_to_usbdev(pfunction);
374
375 if (rsi_find_bulk_in_and_out_endpoints(pfunction, adapter))
376 return -EINVAL;
377
378 adapter->device = &pfunction->dev;
379 usb_set_intfdata(pfunction, adapter);
380
381 common->rx_data_pkt = kmalloc(2048, GFP_KERNEL);
382 if (!common->rx_data_pkt) {
383 rsi_dbg(ERR_ZONE, "%s: Failed to allocate memory\n",
384 __func__);
385 return -ENOMEM;
386 }
387
388 rsi_dev->tx_buffer = kmalloc(2048, GFP_ATOMIC);
389 rsi_dev->rx_usb_urb[0] = usb_alloc_urb(0, GFP_KERNEL);
390 rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt;
391 rsi_dev->tx_blk_size = 252;
392
393 /* Initializing function callbacks */
394 adapter->rx_urb_submit = rsi_rx_urb_submit;
395 adapter->host_intf_write_pkt = rsi_usb_host_intf_write_pkt;
396 adapter->check_hw_queue_status = rsi_usb_check_queue_status;
397 adapter->determine_event_timeout = rsi_usb_event_timeout;
398
399 rsi_init_event(&rsi_dev->rx_thread.event);
400 status = rsi_create_kthread(common, &rsi_dev->rx_thread,
401 rsi_usb_rx_thread, "RX-Thread");
402 if (status) {
403 rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
404 goto fail;
405 }
406
407#ifdef CONFIG_RSI_DEBUGFS
408 /* In USB, one less than the MAX_DEBUGFS_ENTRIES entries is required */
409 adapter->num_debugfs_entries = (MAX_DEBUGFS_ENTRIES - 1);
410#endif
411
412 rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
413 return 0;
414
415fail:
416 kfree(rsi_dev->tx_buffer);
417 kfree(common->rx_data_pkt);
418 return status;
419}
420
421/**
422 * rsi_probe() - This function is called by kernel when the driver provided
423 * Vendor and device IDs are matched. All the initialization
424 * work is done here.
425 * @pfunction: Pointer to the USB interface structure.
426 * @id: Pointer to the usb_device_id structure.
427 *
428 * Return: 0 on success, -1 on failure.
429 */
430static int rsi_probe(struct usb_interface *pfunction,
431 const struct usb_device_id *id)
432{
433 struct rsi_hw *adapter;
434 struct rsi_91x_usbdev *dev;
435 u16 fw_status;
436
437 rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
438
439 adapter = rsi_91x_init();
440 if (!adapter) {
441 rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
442 __func__);
443 return 1;
444 }
445
446 if (rsi_init_usb_interface(adapter, pfunction)) {
447 rsi_dbg(ERR_ZONE, "%s: Failed to init usb interface\n",
448 __func__);
449 goto err;
450 }
451
452 rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
453
454 dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
455
456 if (rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2) < 0)
457 goto err1;
458 else
459 fw_status &= 1;
460
461 if (!fw_status) {
462 if (rsi_usb_device_init(adapter->priv)) {
463 rsi_dbg(ERR_ZONE, "%s: Failed in device init\n",
464 __func__);
465 goto err1;
466 }
467
468 if (rsi_usb_reg_write(dev->usbdev,
469 USB_INTERNAL_REG_1,
470 RSI_USB_READY_MAGIC_NUM, 1) < 0)
471 goto err1;
472 rsi_dbg(INIT_ZONE, "%s: Performed device init\n", __func__);
473 }
474
475 if (rsi_rx_urb_submit(adapter))
476 goto err1;
477
478 return 0;
479err1:
480 rsi_deinit_usb_interface(adapter);
481err:
482 rsi_91x_deinit(adapter);
483 rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
484 return 1;
485}
486
487/**
488 * rsi_disconnect() - This function performs the reverse of the probe function,
489 * it deintialize the driver structure.
490 * @pfunction: Pointer to the USB interface structure.
491 *
492 * Return: None.
493 */
494static void rsi_disconnect(struct usb_interface *pfunction)
495{
496 struct rsi_hw *adapter = usb_get_intfdata(pfunction);
497
498 if (!adapter)
499 return;
500
501 rsi_mac80211_detach(adapter);
502 rsi_deinit_usb_interface(adapter);
503 rsi_91x_deinit(adapter);
504
505 rsi_dbg(INFO_ZONE, "%s: Deinitialization completed\n", __func__);
506}
507
508#ifdef CONFIG_PM
509static int rsi_suspend(struct usb_interface *intf, pm_message_t message)
510{
511 /* Not yet implemented */
512 return -ENOSYS;
513}
514
515static int rsi_resume(struct usb_interface *intf)
516{
517 /* Not yet implemented */
518 return -ENOSYS;
519}
520#endif
521
522static const struct usb_device_id rsi_dev_table[] = {
523 { USB_DEVICE(0x0303, 0x0100) },
524 { USB_DEVICE(0x041B, 0x0301) },
525 { USB_DEVICE(0x041B, 0x0201) },
526 { USB_DEVICE(0x041B, 0x9330) },
527 { /* Blank */},
528};
529
530static struct usb_driver rsi_driver = {
531 .name = "RSI-USB WLAN",
532 .probe = rsi_probe,
533 .disconnect = rsi_disconnect,
534 .id_table = rsi_dev_table,
535#ifdef CONFIG_PM
536 .suspend = rsi_suspend,
537 .resume = rsi_resume,
538#endif
539};
540
541/**
542 * rsi_module_init() - This function registers the client driver.
543 * @void: Void.
544 *
545 * Return: 0 on success.
546 */
547static int rsi_module_init(void)
548{
549 usb_register(&rsi_driver);
550 rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
551 return 0;
552}
553
554/**
555 * rsi_module_exit() - This function unregisters the client driver.
556 * @void: Void.
557 *
558 * Return: None.
559 */
560static void rsi_module_exit(void)
561{
562 usb_deregister(&rsi_driver);
563 rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
564}
565
566module_init(rsi_module_init);
567module_exit(rsi_module_exit);
568
569MODULE_AUTHOR("Redpine Signals Inc");
570MODULE_DESCRIPTION("Common USB layer for RSI drivers");
571MODULE_SUPPORTED_DEVICE("RSI-91x");
572MODULE_DEVICE_TABLE(usb, rsi_dev_table);
573MODULE_FIRMWARE(FIRMWARE_RSI9113);
574MODULE_VERSION("0.1");
575MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
new file mode 100644
index 000000000000..1106ce76707e
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -0,0 +1,177 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 *
16 */
17
18#include <linux/firmware.h>
19#include "rsi_usb.h"
20
21/**
22 * rsi_copy_to_card() - This function includes the actual funtionality of
23 * copying the TA firmware to the card.Basically this
24 * function includes opening the TA file,reading the TA
25 * file and writing their values in blocks of data.
26 * @common: Pointer to the driver private structure.
27 * @fw: Pointer to the firmware value to be written.
28 * @len: length of firmware file.
29 * @num_blocks: Number of blocks to be written to the card.
30 *
31 * Return: 0 on success and -1 on failure.
32 */
33static int rsi_copy_to_card(struct rsi_common *common,
34 const u8 *fw,
35 u32 len,
36 u32 num_blocks)
37{
38 struct rsi_hw *adapter = common->priv;
39 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
40 u32 indx, ii;
41 u32 block_size = dev->tx_blk_size;
42 u32 lsb_address;
43 u32 base_address;
44
45 base_address = TA_LOAD_ADDRESS;
46
47 for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
48 lsb_address = base_address;
49 if (rsi_usb_write_register_multiple(adapter,
50 lsb_address,
51 (u8 *)(fw + indx),
52 block_size)) {
53 rsi_dbg(ERR_ZONE,
54 "%s: Unable to load %s blk\n", __func__,
55 FIRMWARE_RSI9113);
56 return -EIO;
57 }
58 rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
59 base_address += block_size;
60 }
61
62 if (len % block_size) {
63 lsb_address = base_address;
64 if (rsi_usb_write_register_multiple(adapter,
65 lsb_address,
66 (u8 *)(fw + indx),
67 len % block_size)) {
68 rsi_dbg(ERR_ZONE,
69 "%s: Unable to load %s blk\n", __func__,
70 FIRMWARE_RSI9113);
71 return -EIO;
72 }
73 }
74 rsi_dbg(INIT_ZONE,
75 "%s: Succesfully loaded %s instructions\n", __func__,
76 FIRMWARE_RSI9113);
77
78 rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
79 return 0;
80}
81
82/**
83 * rsi_usb_rx_thread() - This is a kernel thread to receive the packets from
84 * the USB device.
85 * @common: Pointer to the driver private structure.
86 *
87 * Return: None.
88 */
89void rsi_usb_rx_thread(struct rsi_common *common)
90{
91 struct rsi_hw *adapter = common->priv;
92 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
93 int status;
94
95 do {
96 rsi_wait_event(&dev->rx_thread.event, EVENT_WAIT_FOREVER);
97
98 if (atomic_read(&dev->rx_thread.thread_done))
99 goto out;
100
101 mutex_lock(&common->tx_rxlock);
102 status = rsi_read_pkt(common, 0);
103 if (status) {
104 rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__);
105 mutex_unlock(&common->tx_rxlock);
106 return;
107 }
108 mutex_unlock(&common->tx_rxlock);
109 rsi_reset_event(&dev->rx_thread.event);
110 if (adapter->rx_urb_submit(adapter)) {
111 rsi_dbg(ERR_ZONE,
112 "%s: Failed in urb submission", __func__);
113 return;
114 }
115 } while (1);
116
117out:
118 rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
119 complete_and_exit(&dev->rx_thread.completion, 0);
120}
121
122
123/**
124 * rsi_load_ta_instructions() - This function includes the actual funtionality
125 * of loading the TA firmware.This function also
126 * includes opening the TA file,reading the TA
127 * file and writing their value in blocks of data.
128 * @common: Pointer to the driver private structure.
129 *
130 * Return: status: 0 on success, -1 on failure.
131 */
132static int rsi_load_ta_instructions(struct rsi_common *common)
133{
134 struct rsi_hw *adapter = common->priv;
135 struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
136 const struct firmware *fw_entry = NULL;
137 u32 block_size = dev->tx_blk_size;
138 const u8 *fw;
139 u32 num_blocks, len;
140 int status = 0;
141
142 status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
143 if (status < 0) {
144 rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
145 __func__, FIRMWARE_RSI9113);
146 return status;
147 }
148
149 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
150 len = fw_entry->size;
151
152 if (len % 4)
153 len += (4 - (len % 4));
154
155 num_blocks = (len / block_size);
156
157 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
158 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
159
160 status = rsi_copy_to_card(common, fw, len, num_blocks);
161 release_firmware(fw_entry);
162 return status;
163}
164
165/**
166 * rsi_device_init() - This Function Initializes The HAL.
167 * @common: Pointer to the driver private structure.
168 *
169 * Return: 0 on success, -1 on failure.
170 */
171int rsi_usb_device_init(struct rsi_common *common)
172{
173 if (rsi_load_ta_instructions(common))
174 return -EIO;
175
176 return 0;
177 }
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
new file mode 100644
index 000000000000..5e2721f7909c
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -0,0 +1,126 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __RSI_BOOTPARAMS_HEADER_H__
18#define __RSI_BOOTPARAMS_HEADER_H__
19
20#define CRYSTAL_GOOD_TIME BIT(0)
21#define BOOTUP_MODE_INFO BIT(1)
22#define WIFI_TAPLL_CONFIGS BIT(5)
23#define WIFI_PLL960_CONFIGS BIT(6)
24#define WIFI_AFEPLL_CONFIGS BIT(7)
25#define WIFI_SWITCH_CLK_CONFIGS BIT(8)
26
27#define TA_PLL_M_VAL_20 8
28#define TA_PLL_N_VAL_20 1
29#define TA_PLL_P_VAL_20 4
30
31#define PLL960_M_VAL_20 0x14
32#define PLL960_N_VAL_20 0
33#define PLL960_P_VAL_20 5
34
35#define UMAC_CLK_40MHZ 40
36
37#define TA_PLL_M_VAL_40 46
38#define TA_PLL_N_VAL_40 3
39#define TA_PLL_P_VAL_40 3
40
41#define PLL960_M_VAL_40 0x14
42#define PLL960_N_VAL_40 0
43#define PLL960_P_VAL_40 5
44
45#define UMAC_CLK_20BW \
46 (((TA_PLL_M_VAL_20 + 1) * 40) / \
47 ((TA_PLL_N_VAL_20 + 1) * (TA_PLL_P_VAL_20 + 1)))
48#define VALID_20 \
49 (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS)
50#define UMAC_CLK_40BW \
51 (((TA_PLL_M_VAL_40 + 1) * 40) / \
52 ((TA_PLL_N_VAL_40 + 1) * (TA_PLL_P_VAL_40 + 1)))
53#define VALID_40 \
54 (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS | \
55 WIFI_TAPLL_CONFIGS | CRYSTAL_GOOD_TIME | BOOTUP_MODE_INFO)
56
57/* structure to store configs related to TAPLL programming */
58struct tapll_info {
59 __le16 pll_reg_1;
60 __le16 pll_reg_2;
61} __packed;
62
63/* structure to store configs related to PLL960 programming */
64struct pll960_info {
65 __le16 pll_reg_1;
66 __le16 pll_reg_2;
67 __le16 pll_reg_3;
68} __packed;
69
70/* structure to store configs related to AFEPLL programming */
71struct afepll_info {
72 __le16 pll_reg;
73} __packed;
74
75/* structure to store configs related to pll configs */
76struct pll_config {
77 struct tapll_info tapll_info_g;
78 struct pll960_info pll960_info_g;
79 struct afepll_info afepll_info_g;
80} __packed;
81
82/* structure to store configs related to UMAC clk programming */
83struct switch_clk {
84 __le16 switch_clk_info;
85 /* If switch_bbp_lmac_clk_reg is set then this value will be programmed
86 * into reg
87 */
88 __le16 bbp_lmac_clk_reg_val;
89 /* if switch_umac_clk is set then this value will be programmed */
90 __le16 umac_clock_reg_config;
91 /* if switch_qspi_clk is set then this value will be programmed */
92 __le16 qspi_uart_clock_reg_config;
93} __packed;
94
95struct device_clk_info {
96 struct pll_config pll_config_g;
97 struct switch_clk switch_clk_g;
98} __packed;
99
100struct bootup_params {
101 __le16 magic_number;
102 __le16 crystal_good_time;
103 __le32 valid;
104 __le32 reserved_for_valids;
105 __le16 bootup_mode_info;
106 /* configuration used for digital loop back */
107 __le16 digital_loop_back_params;
108 __le16 rtls_timestamp_en;
109 __le16 host_spi_intr_cfg;
110 struct device_clk_info device_clk_info[3];
111 /* ulp buckboost wait time */
112 __le32 buckboost_wakeup_cnt;
113 /* pmu wakeup wait time & WDT EN info */
114 __le16 pmu_wakeup_wait;
115 u8 shutdown_wait_time;
116 /* Sleep clock source selection */
117 u8 pmu_slp_clkout_sel;
118 /* WDT programming values */
119 __le32 wdt_prog_value;
120 /* WDT soc reset delay */
121 __le32 wdt_soc_rst_delay;
122 /* dcdc modes configs */
123 __le32 dcdc_operation_mode;
124 __le32 soc_reset_wait_cnt;
125} __packed;
126#endif
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
new file mode 100644
index 000000000000..f2f70784d4ad
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -0,0 +1,87 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __RSI_COMMON_H__
18#define __RSI_COMMON_H__
19
20#include <linux/kthread.h>
21
22#define EVENT_WAIT_FOREVER 0
23#define TA_LOAD_ADDRESS 0x00
24#define FIRMWARE_RSI9113 "rsi_91x.fw"
25#define QUEUE_NOT_FULL 1
26#define QUEUE_FULL 0
27
28static inline int rsi_init_event(struct rsi_event *pevent)
29{
30 atomic_set(&pevent->event_condition, 1);
31 init_waitqueue_head(&pevent->event_queue);
32 return 0;
33}
34
35static inline int rsi_wait_event(struct rsi_event *event, u32 timeout)
36{
37 int status = 0;
38
39 if (!timeout)
40 status = wait_event_interruptible(event->event_queue,
41 (atomic_read(&event->event_condition) == 0));
42 else
43 status = wait_event_interruptible_timeout(event->event_queue,
44 (atomic_read(&event->event_condition) == 0),
45 timeout);
46 return status;
47}
48
49static inline void rsi_set_event(struct rsi_event *event)
50{
51 atomic_set(&event->event_condition, 0);
52 wake_up_interruptible(&event->event_queue);
53}
54
55static inline void rsi_reset_event(struct rsi_event *event)
56{
57 atomic_set(&event->event_condition, 1);
58}
59
60static inline int rsi_create_kthread(struct rsi_common *common,
61 struct rsi_thread *thread,
62 void *func_ptr,
63 u8 *name)
64{
65 init_completion(&thread->completion);
66 thread->task = kthread_run(func_ptr, common, name);
67 if (IS_ERR(thread->task))
68 return (int)PTR_ERR(thread->task);
69
70 return 0;
71}
72
73static inline int rsi_kill_thread(struct rsi_thread *handle)
74{
75 atomic_inc(&handle->thread_done);
76 rsi_set_event(&handle->event);
77
78 wait_for_completion(&handle->completion);
79 return kthread_stop(handle->task);
80}
81
82void rsi_mac80211_detach(struct rsi_hw *hw);
83u16 rsi_get_connected_channel(struct rsi_hw *adapter);
84struct rsi_hw *rsi_91x_init(void);
85void rsi_91x_deinit(struct rsi_hw *adapter);
86int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len);
87#endif
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
new file mode 100644
index 000000000000..580ad3b3f710
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -0,0 +1,48 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __RSI_DEBUGFS_H__
18#define __RSI_DEBUGFS_H__
19
20#include "rsi_main.h"
21#include <linux/debugfs.h>
22
23#ifndef CONFIG_RSI_DEBUGFS
24static inline int rsi_init_dbgfs(struct rsi_hw *adapter)
25{
26 return 0;
27}
28
29static inline void rsi_remove_dbgfs(struct rsi_hw *adapter)
30{
31 return;
32}
33#else
34struct rsi_dbg_files {
35 const char *name;
36 umode_t perms;
37 const struct file_operations fops;
38};
39
40struct rsi_debugfs {
41 struct dentry *subdir;
42 struct rsi_dbg_ops *dfs_get_ops;
43 struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
44};
45int rsi_init_dbgfs(struct rsi_hw *adapter);
46void rsi_remove_dbgfs(struct rsi_hw *adapter);
47#endif
48#endif
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
new file mode 100644
index 000000000000..2cb73e7edb98
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -0,0 +1,218 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __RSI_MAIN_H__
18#define __RSI_MAIN_H__
19
20#include <linux/string.h>
21#include <linux/skbuff.h>
22#include <net/mac80211.h>
23
24#define ERR_ZONE BIT(0) /* For Error Msgs */
25#define INFO_ZONE BIT(1) /* For General Status Msgs */
26#define INIT_ZONE BIT(2) /* For Driver Init Seq Msgs */
27#define MGMT_TX_ZONE BIT(3) /* For TX Mgmt Path Msgs */
28#define MGMT_RX_ZONE BIT(4) /* For RX Mgmt Path Msgs */
29#define DATA_TX_ZONE BIT(5) /* For TX Data Path Msgs */
30#define DATA_RX_ZONE BIT(6) /* For RX Data Path Msgs */
31#define FSM_ZONE BIT(7) /* For State Machine Msgs */
32#define ISR_ZONE BIT(8) /* For Interrupt Msgs */
33
34#define FSM_CARD_NOT_READY 0
35#define FSM_BOOT_PARAMS_SENT 1
36#define FSM_EEPROM_READ_MAC_ADDR 2
37#define FSM_RESET_MAC_SENT 3
38#define FSM_RADIO_CAPS_SENT 4
39#define FSM_BB_RF_PROG_SENT 5
40#define FSM_MAC_INIT_DONE 6
41
42extern u32 rsi_zone_enabled;
43extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
44
45#define RSI_MAX_VIFS 1
46#define NUM_EDCA_QUEUES 4
47#define IEEE80211_ADDR_LEN 6
48#define FRAME_DESC_SZ 16
49#define MIN_802_11_HDR_LEN 24
50
51#define DATA_QUEUE_WATER_MARK 400
52#define MIN_DATA_QUEUE_WATER_MARK 300
53#define MULTICAST_WATER_MARK 200
54#define MAC_80211_HDR_FRAME_CONTROL 0
55#define WME_NUM_AC 4
56#define NUM_SOFT_QUEUES 5
57#define MAX_HW_QUEUES 8
58#define INVALID_QUEUE 0xff
59#define MAX_CONTINUOUS_VO_PKTS 8
60#define MAX_CONTINUOUS_VI_PKTS 4
61
62/* Queue information */
63#define RSI_WIFI_MGMT_Q 0x4
64#define RSI_WIFI_DATA_Q 0x5
65#define IEEE80211_MGMT_FRAME 0x00
66#define IEEE80211_CTL_FRAME 0x04
67
68#define IEEE80211_QOS_TID 0x0f
69#define IEEE80211_NONQOS_TID 16
70
71#define MAX_DEBUGFS_ENTRIES 4
72
73#define TID_TO_WME_AC(_tid) ( \
74 ((_tid) == 0 || (_tid) == 3) ? BE_Q : \
75 ((_tid) < 3) ? BK_Q : \
76 ((_tid) < 6) ? VI_Q : \
77 VO_Q)
78
79#define WME_AC(_q) ( \
80 ((_q) == BK_Q) ? IEEE80211_AC_BK : \
81 ((_q) == BE_Q) ? IEEE80211_AC_BE : \
82 ((_q) == VI_Q) ? IEEE80211_AC_VI : \
83 IEEE80211_AC_VO)
84
85struct version_info {
86 u16 major;
87 u16 minor;
88 u16 release_num;
89 u16 patch_num;
90} __packed;
91
92struct skb_info {
93 s8 rssi;
94 u32 flags;
95 u16 channel;
96 s8 tid;
97 s8 sta_id;
98};
99
100enum edca_queue {
101 BK_Q,
102 BE_Q,
103 VI_Q,
104 VO_Q,
105 MGMT_SOFT_Q
106};
107
108struct security_info {
109 bool security_enable;
110 u32 ptk_cipher;
111 u32 gtk_cipher;
112};
113
114struct wmm_qinfo {
115 s32 weight;
116 s32 wme_params;
117 s32 pkt_contended;
118};
119
120struct transmit_q_stats {
121 u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 1];
122 u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 1];
123};
124
125struct vif_priv {
126 bool is_ht;
127 bool sgi;
128 u16 seq_start;
129};
130
131struct rsi_event {
132 atomic_t event_condition;
133 wait_queue_head_t event_queue;
134};
135
136struct rsi_thread {
137 void (*thread_function)(void *);
138 struct completion completion;
139 struct task_struct *task;
140 struct rsi_event event;
141 atomic_t thread_done;
142};
143
144struct rsi_hw;
145
146struct rsi_common {
147 struct rsi_hw *priv;
148 struct vif_priv vif_info[RSI_MAX_VIFS];
149
150 bool mgmt_q_block;
151 struct version_info driver_ver;
152 struct version_info fw_ver;
153
154 struct rsi_thread tx_thread;
155 struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1];
156 /* Mutex declaration */
157 struct mutex mutex;
158 /* Mutex used between tx/rx threads */
159 struct mutex tx_rxlock;
160 u8 endpoint;
161
162 /* Channel/band related */
163 u8 band;
164 u8 channel_width;
165
166 u16 rts_threshold;
167 u16 bitrate_mask[2];
168 u32 fixedrate_mask[2];
169
170 u8 rf_reset;
171 struct transmit_q_stats tx_stats;
172 struct security_info secinfo;
173 struct wmm_qinfo tx_qinfo[NUM_EDCA_QUEUES];
174 struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
175 u8 mac_addr[IEEE80211_ADDR_LEN];
176
177 /* state related */
178 u32 fsm_state;
179 bool init_done;
180 u8 bb_rf_prog_count;
181 bool iface_down;
182
183 /* Generic */
184 u8 channel;
185 u8 *rx_data_pkt;
186 u8 mac_id;
187 u8 radio_id;
188 u16 rate_pwr[20];
189 u16 min_rate;
190
191 /* WMM algo related */
192 u8 selected_qnum;
193 u32 pkt_cnt;
194 u8 min_weight;
195};
196
197struct rsi_hw {
198 struct rsi_common *priv;
199 struct ieee80211_hw *hw;
200 struct ieee80211_vif *vifs[RSI_MAX_VIFS];
201 struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
202 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
203
204 struct device *device;
205 u8 sc_nvifs;
206
207#ifdef CONFIG_RSI_DEBUGFS
208 struct rsi_debugfs *dfsentry;
209 u8 num_debugfs_entries;
210#endif
211 void *rsi_dev;
212 int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
213 int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
214 int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num);
215 int (*rx_urb_submit)(struct rsi_hw *adapter);
216 int (*determine_event_timeout)(struct rsi_hw *adapter);
217};
218#endif
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
new file mode 100644
index 000000000000..ac67c4ad63c2
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -0,0 +1,285 @@
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __RSI_MGMT_H__
18#define __RSI_MGMT_H__
19
20#include <linux/sort.h>
21#include "rsi_boot_params.h"
22#include "rsi_main.h"
23
24#define MAX_MGMT_PKT_SIZE 512
25#define RSI_NEEDED_HEADROOM 80
26#define RSI_RCV_BUFFER_LEN 2000
27
28#define RSI_11B_MODE 0
29#define RSI_11G_MODE BIT(7)
30#define RETRY_COUNT 8
31#define RETRY_LONG 4
32#define RETRY_SHORT 7
33#define WMM_SHORT_SLOT_TIME 9
34#define SIFS_DURATION 16
35
36#define KEY_TYPE_CLEAR 0
37#define RSI_PAIRWISE_KEY 1
38#define RSI_GROUP_KEY 2
39
40/* EPPROM_READ_ADDRESS */
41#define WLAN_MAC_EEPROM_ADDR 40
42#define WLAN_MAC_MAGIC_WORD_LEN 0x01
43#define WLAN_HOST_MODE_LEN 0x04
44#define WLAN_FW_VERSION_LEN 0x08
45#define MAGIC_WORD 0x5A
46
47/* Receive Frame Types */
48#define TA_CONFIRM_TYPE 0x01
49#define RX_DOT11_MGMT 0x02
50#define TX_STATUS_IND 0x04
51#define PROBEREQ_CONFIRM 2
52#define CARD_READY_IND 0x00
53
54#define RSI_DELETE_PEER 0x0
55#define RSI_ADD_PEER 0x1
56#define START_AMPDU_AGGR 0x1
57#define STOP_AMPDU_AGGR 0x0
58#define INTERNAL_MGMT_PKT 0x99
59
60#define PUT_BBP_RESET 0
61#define BBP_REG_WRITE 0
62#define RF_RESET_ENABLE BIT(3)
63#define RATE_INFO_ENABLE BIT(0)
64#define RSI_BROADCAST_PKT BIT(9)
65
66#define UPPER_20_ENABLE (0x2 << 12)
67#define LOWER_20_ENABLE (0x4 << 12)
68#define FULL40M_ENABLE 0x6
69
70#define RSI_LMAC_CLOCK_80MHZ 0x1
71#define RSI_ENABLE_40MHZ (0x1 << 3)
72
73#define RX_BA_INDICATION 1
74#define RSI_TBL_SZ 40
75#define MAX_RETRIES 8
76
77#define STD_RATE_MCS7 0x07
78#define STD_RATE_MCS6 0x06
79#define STD_RATE_MCS5 0x05
80#define STD_RATE_MCS4 0x04
81#define STD_RATE_MCS3 0x03
82#define STD_RATE_MCS2 0x02
83#define STD_RATE_MCS1 0x01
84#define STD_RATE_MCS0 0x00
85#define STD_RATE_54 0x6c
86#define STD_RATE_48 0x60
87#define STD_RATE_36 0x48
88#define STD_RATE_24 0x30
89#define STD_RATE_18 0x24
90#define STD_RATE_12 0x18
91#define STD_RATE_11 0x16
92#define STD_RATE_09 0x12
93#define STD_RATE_06 0x0C
94#define STD_RATE_5_5 0x0B
95#define STD_RATE_02 0x04
96#define STD_RATE_01 0x02
97
98#define RSI_RF_TYPE 1
99#define RSI_RATE_00 0x00
100#define RSI_RATE_1 0x0
101#define RSI_RATE_2 0x2
102#define RSI_RATE_5_5 0x4
103#define RSI_RATE_11 0x6
104#define RSI_RATE_6 0x8b
105#define RSI_RATE_9 0x8f
106#define RSI_RATE_12 0x8a
107#define RSI_RATE_18 0x8e
108#define RSI_RATE_24 0x89
109#define RSI_RATE_36 0x8d
110#define RSI_RATE_48 0x88
111#define RSI_RATE_54 0x8c
112#define RSI_RATE_MCS0 0x100
113#define RSI_RATE_MCS1 0x101
114#define RSI_RATE_MCS2 0x102
115#define RSI_RATE_MCS3 0x103
116#define RSI_RATE_MCS4 0x104
117#define RSI_RATE_MCS5 0x105
118#define RSI_RATE_MCS6 0x106
119#define RSI_RATE_MCS7 0x107
120#define RSI_RATE_MCS7_SG 0x307
121
122#define BW_20MHZ 0
123#define BW_40MHZ 1
124
125#define RSI_SUPP_FILTERS (FIF_ALLMULTI | FIF_PROBE_REQ |\
126 FIF_BCN_PRBRESP_PROMISC)
127enum opmode {
128 STA_OPMODE = 1,
129 AP_OPMODE = 2
130};
131
132extern struct ieee80211_rate rsi_rates[12];
133extern const u16 rsi_mcsrates[8];
134
135enum sta_notify_events {
136 STA_CONNECTED = 0,
137 STA_DISCONNECTED,
138 STA_TX_ADDBA_DONE,
139 STA_TX_DELBA,
140 STA_RX_ADDBA_DONE,
141 STA_RX_DELBA
142};
143
144/* Send Frames Types */
145enum cmd_frame_type {
146 TX_DOT11_MGMT,
147 RESET_MAC_REQ,
148 RADIO_CAPABILITIES,
149 BB_PROG_VALUES_REQUEST,
150 RF_PROG_VALUES_REQUEST,
151 WAKEUP_SLEEP_REQUEST,
152 SCAN_REQUEST,
153 TSF_UPDATE,
154 PEER_NOTIFY,
155 BLOCK_UNBLOCK,
156 SET_KEY_REQ,
157 AUTO_RATE_IND,
158 BOOTUP_PARAMS_REQUEST,
159 VAP_CAPABILITIES,
160 EEPROM_READ_TYPE ,
161 EEPROM_WRITE,
162 GPIO_PIN_CONFIG ,
163 SET_RX_FILTER,
164 AMPDU_IND,
165 STATS_REQUEST_FRAME,
166 BB_BUF_PROG_VALUES_REQ,
167 BBP_PROG_IN_TA,
168 BG_SCAN_PARAMS,
169 BG_SCAN_PROBE_REQ,
170 CW_MODE_REQ,
171 PER_CMD_PKT
172};
173
174struct rsi_mac_frame {
175 __le16 desc_word[8];
176} __packed;
177
178struct rsi_boot_params {
179 __le16 desc_word[8];
180 struct bootup_params bootup_params;
181} __packed;
182
183struct rsi_peer_notify {
184 __le16 desc_word[8];
185 u8 mac_addr[6];
186 __le16 command;
187 __le16 mpdu_density;
188 __le16 reserved;
189 __le32 sta_flags;
190} __packed;
191
192struct rsi_vap_caps {
193 __le16 desc_word[8];
194 u8 mac_addr[6];
195 __le16 keep_alive_period;
196 u8 bssid[6];
197 __le16 reserved;
198 __le32 flags;
199 __le16 frag_threshold;
200 __le16 rts_threshold;
201 __le32 default_mgmt_rate;
202 __le32 default_ctrl_rate;
203 __le32 default_data_rate;
204 __le16 beacon_interval;
205 __le16 dtim_period;
206} __packed;
207
208struct rsi_set_key {
209 __le16 desc_word[8];
210 u8 key[4][32];
211 u8 tx_mic_key[8];
212 u8 rx_mic_key[8];
213} __packed;
214
215struct rsi_auto_rate {
216 __le16 desc_word[8];
217 __le16 failure_limit;
218 __le16 initial_boundary;
219 __le16 max_threshold_limt;
220 __le16 num_supported_rates;
221 __le16 aarf_rssi;
222 __le16 moderate_rate_inx;
223 __le16 collision_tolerance;
224 __le16 supported_rates[40];
225} __packed;
226
227struct qos_params {
228 __le16 cont_win_min_q;
229 __le16 cont_win_max_q;
230 __le16 aifsn_val_q;
231 __le16 txop_q;
232} __packed;
233
234struct rsi_radio_caps {
235 __le16 desc_word[8];
236 struct qos_params qos_params[MAX_HW_QUEUES];
237 u8 num_11n_rates;
238 u8 num_11ac_rates;
239 __le16 gcpd_per_rate[20];
240} __packed;
241
242static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
243{
244 return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
245}
246
247static inline u32 rsi_get_length(u8 *addr, u16 offset)
248{
249 return (le16_to_cpu(*(__le16 *)&addr[offset])) & 0x0fff;
250}
251
252static inline u8 rsi_get_extended_desc(u8 *addr, u16 offset)
253{
254 return le16_to_cpu(*((__le16 *)&addr[offset + 4])) & 0x00ff;
255}
256
257static inline u8 rsi_get_rssi(u8 *addr)
258{
259 return *(u8 *)(addr + FRAME_DESC_SZ);
260}
261
262static inline u8 rsi_get_channel(u8 *addr)
263{
264 return *(char *)(addr + 15);
265}
266
267int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg);
268int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode);
269int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
270 u16 ssn, u8 buf_size, u8 event);
271int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
272 u8 key_type, u8 key_id, u32 cipher);
273int rsi_set_channel(struct rsi_common *common, u16 chno);
274void rsi_inform_bss_status(struct rsi_common *common, u8 status,
275 const u8 *bssid, u8 qos_enable, u16 aid);
276void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
277int rsi_mac80211_attach(struct rsi_common *common);
278void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb,
279 int status);
280bool rsi_is_cipher_wep(struct rsi_common *common);
281void rsi_core_qos_processor(struct rsi_common *common);
282void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
283int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
284int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
285#endif
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
new file mode 100644
index 000000000000..df4b5e20e05f
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -0,0 +1,129 @@
1/**
2 * @section LICENSE
3 * Copyright (c) 2014 Redpine Signals Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19#ifndef __RSI_SDIO_INTF__
20#define __RSI_SDIO_INTF__
21
22#include <linux/mmc/card.h>
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/sdio_func.h>
26#include <linux/mmc/sdio.h>
27#include <linux/mmc/sd.h>
28#include <linux/mmc/sdio_ids.h>
29#include "rsi_main.h"
30
31enum sdio_interrupt_type {
32 BUFFER_FULL = 0x0,
33 BUFFER_AVAILABLE = 0x1,
34 FIRMWARE_ASSERT_IND = 0x3,
35 MSDU_PACKET_PENDING = 0x4,
36 UNKNOWN_INT = 0XE
37};
38
39/* Buffer status register related info */
40#define PKT_BUFF_SEMI_FULL 0
41#define PKT_BUFF_FULL 1
42#define PKT_MGMT_BUFF_FULL 2
43#define MSDU_PKT_PENDING 3
44/* Interrupt Bit Related Macros */
45#define PKT_BUFF_AVAILABLE 0
46#define FW_ASSERT_IND 2
47
48#define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3
49#define RSI_FN1_INT_REGISTER 0xf9
50#define RSI_SD_REQUEST_MASTER 0x10000
51
52/* FOR SD CARD ONLY */
53#define SDIO_RX_NUM_BLOCKS_REG 0x000F1
54#define SDIO_FW_STATUS_REG 0x000F2
55#define SDIO_NXT_RD_DELAY2 0x000F5
56#define SDIO_MASTER_ACCESS_MSBYTE 0x000FA
57#define SDIO_MASTER_ACCESS_LSBYTE 0x000FB
58#define SDIO_READ_START_LVL 0x000FC
59#define SDIO_READ_FIFO_CTL 0x000FD
60#define SDIO_WRITE_FIFO_CTL 0x000FE
61#define SDIO_FUN1_INTR_CLR_REG 0x0008
62#define SDIO_REG_HIGH_SPEED 0x0013
63
64#define RSI_GET_SDIO_INTERRUPT_TYPE(_I, TYPE) \
65 { \
66 TYPE = \
67 (_I & (1 << PKT_BUFF_AVAILABLE)) ? \
68 BUFFER_AVAILABLE : \
69 (_I & (1 << MSDU_PKT_PENDING)) ? \
70 MSDU_PACKET_PENDING : \
71 (_I & (1 << FW_ASSERT_IND)) ? \
72 FIRMWARE_ASSERT_IND : UNKNOWN_INT; \
73 }
74
75/* common registers in SDIO function1 */
76#define TA_SOFT_RESET_REG 0x0004
77#define TA_TH0_PC_REG 0x0400
78#define TA_HOLD_THREAD_REG 0x0844
79#define TA_RELEASE_THREAD_REG 0x0848
80
81#define TA_SOFT_RST_CLR 0
82#define TA_SOFT_RST_SET BIT(0)
83#define TA_PC_ZERO 0
84#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF)
85#define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF)
86#define TA_BASE_ADDR 0x2200
87#define MISC_CFG_BASE_ADDR 0x4150
88
89struct receive_info {
90 bool buffer_full;
91 bool semi_buffer_full;
92 bool mgmt_buffer_full;
93 u32 mgmt_buf_full_counter;
94 u32 buf_semi_full_counter;
95 u8 watch_bufferfull_count;
96 u32 sdio_intr_status_zero;
97 u32 sdio_int_counter;
98 u32 total_sdio_msdu_pending_intr;
99 u32 total_sdio_unknown_intr;
100 u32 buf_full_counter;
101 u32 buf_avilable_counter;
102};
103
104struct rsi_91x_sdiodev {
105 struct sdio_func *pfunction;
106 struct task_struct *in_sdio_litefi_irq;
107 struct receive_info rx_info;
108 u32 next_read_delay;
109 u32 sdio_high_speed_enable;
110 u8 sdio_clock_speed;
111 u32 cardcapability;
112 u8 prev_desc[16];
113 u32 tx_blk_size;
114 u8 write_fail;
115};
116
117void rsi_interrupt_handler(struct rsi_hw *adapter);
118int rsi_init_sdio_slave_regs(struct rsi_hw *adapter);
119int rsi_sdio_device_init(struct rsi_common *common);
120int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data);
121int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length);
122int rsi_sdio_write_register(struct rsi_hw *adapter, u8 function,
123 u32 addr, u8 *data);
124int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u32 addr,
125 u8 *data, u32 count);
126void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit);
127int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter);
128int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num);
129#endif
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
new file mode 100644
index 000000000000..ebea0c411ead
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -0,0 +1,68 @@
1/**
2 * @section LICENSE
3 * Copyright (c) 2014 Redpine Signals Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef __RSI_USB_INTF__
19#define __RSI_USB_INTF__
20
21#include <linux/usb.h>
22#include "rsi_main.h"
23#include "rsi_common.h"
24
25#define USB_INTERNAL_REG_1 0x25000
26#define RSI_USB_READY_MAGIC_NUM 0xab
27#define FW_STATUS_REG 0x41050012
28
29#define USB_VENDOR_REGISTER_READ 0x15
30#define USB_VENDOR_REGISTER_WRITE 0x16
31#define RSI_USB_TX_HEAD_ROOM 128
32
33#define MAX_RX_URBS 1
34#define MAX_BULK_EP 8
35#define MGMT_EP 1
36#define DATA_EP 2
37
38struct rsi_91x_usbdev {
39 struct rsi_thread rx_thread;
40 u8 endpoint;
41 struct usb_device *usbdev;
42 struct usb_interface *pfunction;
43 struct urb *rx_usb_urb[MAX_RX_URBS];
44 u8 *tx_buffer;
45 __le16 bulkin_size;
46 u8 bulkin_endpoint_addr;
47 __le16 bulkout_size[MAX_BULK_EP];
48 u8 bulkout_endpoint_addr[MAX_BULK_EP];
49 u32 tx_blk_size;
50 u8 write_fail;
51};
52
53static inline int rsi_usb_check_queue_status(struct rsi_hw *adapter, u8 q_num)
54{
55 /* In USB, there isn't any need to check the queue status */
56 return QUEUE_NOT_FULL;
57}
58
59static inline int rsi_usb_event_timeout(struct rsi_hw *adapter)
60{
61 return EVENT_WAIT_FOREVER;
62}
63
64int rsi_usb_device_init(struct rsi_common *common);
65int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr,
66 u8 *data, u32 count);
67void rsi_usb_rx_thread(struct rsi_common *common);
68#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 42a2e06512f2..a49c3d73ea2c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -125,9 +125,9 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
125 125
126 tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100)); 126 tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
127 if (unlikely(tout)) 127 if (unlikely(tout))
128 rt2x00_warn(entry->queue->rt2x00dev, 128 rt2x00_dbg(entry->queue->rt2x00dev,
129 "TX status timeout for entry %d in queue %d\n", 129 "TX status timeout for entry %d in queue %d\n",
130 entry->entry_idx, entry->queue->qid); 130 entry->entry_idx, entry->queue->qid);
131 return tout; 131 return tout;
132 132
133} 133}
@@ -566,8 +566,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
566 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); 566 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
567 567
568 if (unlikely(rt2x00queue_empty(queue))) { 568 if (unlikely(rt2x00queue_empty(queue))) {
569 rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n", 569 rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
570 qid); 570 qid);
571 break; 571 break;
572 } 572 }
573 573
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 2e3d1645e68b..90fdb02b55e7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -286,7 +286,7 @@ static ssize_t rt2x00debug_read_queue_dump(struct file *file,
286 if (retval) 286 if (retval)
287 return retval; 287 return retval;
288 288
289 status = min((size_t)skb->len, length); 289 status = min_t(size_t, skb->len, length);
290 if (copy_to_user(buf, skb->data, status)) { 290 if (copy_to_user(buf, skb->data, status)) {
291 status = -EFAULT; 291 status = -EFAULT;
292 goto exit; 292 goto exit;
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
index 30332175bcd8..1ce1d55f0010 100644
--- a/drivers/net/wireless/rtl818x/Kconfig
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -2,11 +2,11 @@
2# RTL818X Wireless LAN device configuration 2# RTL818X Wireless LAN device configuration
3# 3#
4config RTL8180 4config RTL8180
5 tristate "Realtek 8180/8185 PCI support" 5 tristate "Realtek 8180/8185/8187SE PCI support"
6 depends on MAC80211 && PCI 6 depends on MAC80211 && PCI
7 select EEPROM_93CX6 7 select EEPROM_93CX6
8 ---help--- 8 ---help---
9 This is a driver for RTL8180 and RTL8185 based cards. 9 This is a driver for RTL8180, RTL8185 and RTL8187SE based cards.
10 These are PCI based chips found in cards such as: 10 These are PCI based chips found in cards such as:
11 11
12 (RTL8185 802.11g) 12 (RTL8185 802.11g)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index cb4fb8596f0b..08b056db4a3b 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,4 +1,4 @@
1rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o 1rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
2 2
3obj-$(CONFIG_RTL8180) += rtl8180.o 3obj-$(CONFIG_RTL8180) += rtl8180.o
4 4
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 3867d1470b36..98d8256f0377 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1,15 +1,42 @@
1 1
2/* 2/* Linux device driver for RTL8180 / RTL8185 / RTL8187SE
3 * Linux device driver for RTL8180 / RTL8185
4 * 3 *
5 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
6 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com> 5 * Copyright 2007,2014 Andrea Merello <andrea.merello@gmail.com>
7 * 6 *
8 * Based on the r8180 driver, which is: 7 * Based on the r8180 driver, which is:
9 * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al. 8 * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
10 * 9 *
11 * Thanks to Realtek for their support! 10 * Thanks to Realtek for their support!
12 * 11 *
12 ************************************************************************
13 *
14 * The driver was extended to the RTL8187SE in 2014 by
15 * Andrea Merello <andrea.merello@gmail.com>
16 *
17 * based also on:
18 * - portions of rtl8187se Linux staging driver, Copyright Realtek corp.
19 * - other GPL, unpublished (until now), Linux driver code,
20 * Copyright Larry Finger <Larry.Finger@lwfinger.net>
21 *
22 * A huge thanks goes to Sara V. Nari who forgives me when I'm
23 * sitting in front of my laptop at evening, week-end, night...
24 *
25 * A special thanks goes to Antonio Cuni, who helped me with
26 * some python userspace stuff I used to debug RTL8187SE code, and who
27 * bought a laptop with an unsupported Wi-Fi card some years ago...
28 *
29 * Thanks to Larry Finger for writing some code for rtl8187se and for
30 * his suggestions.
31 *
32 * Thanks to Dan Carpenter for reviewing my initial patch and for his
33 * suggestions.
34 *
35 * Thanks to Bernhard Schiffner for his help in testing and for his
36 * suggestions.
37 *
38 ************************************************************************
39 *
13 * This program is free software; you can redistribute it and/or modify 40 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 41 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation. 42 * published by the Free Software Foundation.
@@ -29,13 +56,18 @@
29#include "sa2400.h" 56#include "sa2400.h"
30#include "max2820.h" 57#include "max2820.h"
31#include "grf5101.h" 58#include "grf5101.h"
59#include "rtl8225se.h"
32 60
33MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 61MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
34MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>"); 62MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
35MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver"); 63MODULE_DESCRIPTION("RTL8180 / RTL8185 / RTL8187SE PCI wireless driver");
36MODULE_LICENSE("GPL"); 64MODULE_LICENSE("GPL");
37 65
38static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = { 66static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
67
68 /* rtl8187se */
69 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8199) },
70
39 /* rtl8185 */ 71 /* rtl8185 */
40 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) }, 72 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
41 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) }, 73 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -85,6 +117,76 @@ static const struct ieee80211_channel rtl818x_channels[] = {
85 { .center_freq = 2484 }, 117 { .center_freq = 2484 },
86}; 118};
87 119
120/* Queues for rtl8187se card
121 *
122 * name | reg | queue
123 * BC | 7 | 6
124 * MG | 1 | 0
125 * HI | 6 | 1
126 * VO | 5 | 2
127 * VI | 4 | 3
128 * BE | 3 | 4
129 * BK | 2 | 5
130 *
131 * The complete map for DMA kick reg using use all queue is:
132 * static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] =
133 * {1, 6, 5, 4, 3, 2, 7};
134 *
135 * .. but.. Because for mac80211 4 queues are enough for QoS we use this
136 *
137 * name | reg | queue
138 * BC | 7 | 4 <- currently not used yet
139 * MG | 1 | x <- Not used
140 * HI | 6 | x <- Not used
141 * VO | 5 | 0 <- used
142 * VI | 4 | 1 <- used
143 * BE | 3 | 2 <- used
144 * BK | 2 | 3 <- used
145 *
146 * Beacon queue could be used, but this is not finished yet.
147 *
148 * I thougth about using the other two queues but I decided not to do this:
149 *
150 * - I'm unsure whether the mac80211 will ever try to use more than 4 queues
151 * by itself.
152 *
153 * - I could route MGMT frames (currently sent over VO queue) to the MGMT
154 * queue but since mac80211 will do not know about it, I will probably gain
155 * some HW priority whenever the VO queue is not empty, but this gain is
156 * limited by the fact that I had to stop the mac80211 queue whenever one of
157 * the VO or MGMT queues is full, stopping also submitting of MGMT frame
158 * to the driver.
159 *
160 * - I don't know how to set in the HW the contention window params for MGMT
161 * and HI-prio queues.
162 */
163
164static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] = {5, 4, 3, 2, 7};
165
166/* Queues for rtl8180/rtl8185 cards
167 *
168 * name | reg | prio
169 * BC | 7 | 3
170 * HI | 6 | 0
171 * NO | 5 | 1
172 * LO | 4 | 2
173 *
174 * The complete map for DMA kick reg using all queue is:
175 * static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {6, 5, 4, 7};
176 *
177 * .. but .. Because the mac80211 needs at least 4 queues for QoS or
178 * otherwise QoS can't be done, we use just one.
179 * Beacon queue could be used, but this is not finished yet.
180 * Actual map is:
181 *
182 * name | reg | prio
183 * BC | 7 | 1 <- currently not used yet.
184 * HI | 6 | x <- not used
185 * NO | 5 | x <- not used
186 * LO | 4 | 0 <- used
187 */
188
189static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {4, 7};
88 190
89void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) 191void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
90{ 192{
@@ -105,14 +207,30 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
105static void rtl8180_handle_rx(struct ieee80211_hw *dev) 207static void rtl8180_handle_rx(struct ieee80211_hw *dev)
106{ 208{
107 struct rtl8180_priv *priv = dev->priv; 209 struct rtl8180_priv *priv = dev->priv;
210 struct rtl818x_rx_cmd_desc *cmd_desc;
108 unsigned int count = 32; 211 unsigned int count = 32;
109 u8 signal, agc, sq; 212 u8 signal, agc, sq;
110 dma_addr_t mapping; 213 dma_addr_t mapping;
111 214
112 while (count--) { 215 while (count--) {
113 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; 216 void *entry = priv->rx_ring + priv->rx_idx * priv->rx_ring_sz;
114 struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; 217 struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
115 u32 flags = le32_to_cpu(entry->flags); 218 u32 flags, flags2;
219 u64 tsft;
220
221 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
222 struct rtl8187se_rx_desc *desc = entry;
223
224 flags = le32_to_cpu(desc->flags);
225 flags2 = le32_to_cpu(desc->flags2);
226 tsft = le64_to_cpu(desc->tsft);
227 } else {
228 struct rtl8180_rx_desc *desc = entry;
229
230 flags = le32_to_cpu(desc->flags);
231 flags2 = le32_to_cpu(desc->flags2);
232 tsft = le64_to_cpu(desc->tsft);
233 }
116 234
117 if (flags & RTL818X_RX_DESC_FLAG_OWN) 235 if (flags & RTL818X_RX_DESC_FLAG_OWN)
118 return; 236 return;
@@ -122,7 +240,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
122 RTL818X_RX_DESC_FLAG_RX_ERR))) 240 RTL818X_RX_DESC_FLAG_RX_ERR)))
123 goto done; 241 goto done;
124 else { 242 else {
125 u32 flags2 = le32_to_cpu(entry->flags2);
126 struct ieee80211_rx_status rx_status = {0}; 243 struct ieee80211_rx_status rx_status = {0};
127 struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_SIZE); 244 struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_SIZE);
128 245
@@ -148,19 +265,24 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
148 rx_status.antenna = (flags2 >> 15) & 1; 265 rx_status.antenna = (flags2 >> 15) & 1;
149 rx_status.rate_idx = (flags >> 20) & 0xF; 266 rx_status.rate_idx = (flags >> 20) & 0xF;
150 agc = (flags2 >> 17) & 0x7F; 267 agc = (flags2 >> 17) & 0x7F;
151 if (priv->r8185) { 268
269 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
152 if (rx_status.rate_idx > 3) 270 if (rx_status.rate_idx > 3)
153 signal = 90 - clamp_t(u8, agc, 25, 90); 271 signal = 90 - clamp_t(u8, agc, 25, 90);
154 else 272 else
155 signal = 95 - clamp_t(u8, agc, 30, 95); 273 signal = 95 - clamp_t(u8, agc, 30, 95);
156 } else { 274 } else if (priv->chip_family ==
275 RTL818X_CHIP_FAMILY_RTL8180) {
157 sq = flags2 & 0xff; 276 sq = flags2 & 0xff;
158 signal = priv->rf->calc_rssi(agc, sq); 277 signal = priv->rf->calc_rssi(agc, sq);
278 } else {
279 /* TODO: rtl8187se rssi */
280 signal = 10;
159 } 281 }
160 rx_status.signal = signal; 282 rx_status.signal = signal;
161 rx_status.freq = dev->conf.chandef.chan->center_freq; 283 rx_status.freq = dev->conf.chandef.chan->center_freq;
162 rx_status.band = dev->conf.chandef.chan->band; 284 rx_status.band = dev->conf.chandef.chan->band;
163 rx_status.mactime = le64_to_cpu(entry->tsft); 285 rx_status.mactime = tsft;
164 rx_status.flag |= RX_FLAG_MACTIME_START; 286 rx_status.flag |= RX_FLAG_MACTIME_START;
165 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 287 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
166 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 288 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -174,11 +296,13 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
174 } 296 }
175 297
176 done: 298 done:
177 entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb)); 299 cmd_desc = entry;
178 entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN | 300 cmd_desc->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
301 cmd_desc->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
179 MAX_RX_SIZE); 302 MAX_RX_SIZE);
180 if (priv->rx_idx == 31) 303 if (priv->rx_idx == 31)
181 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); 304 cmd_desc->flags |=
305 cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
182 priv->rx_idx = (priv->rx_idx + 1) % 32; 306 priv->rx_idx = (priv->rx_idx + 1) % 32;
183 } 307 }
184} 308}
@@ -218,6 +342,55 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
218 } 342 }
219} 343}
220 344
345static irqreturn_t rtl8187se_interrupt(int irq, void *dev_id)
346{
347 struct ieee80211_hw *dev = dev_id;
348 struct rtl8180_priv *priv = dev->priv;
349 u32 reg;
350 unsigned long flags;
351 static int desc_err;
352
353 spin_lock_irqsave(&priv->lock, flags);
354 /* Note: 32-bit interrupt status */
355 reg = rtl818x_ioread32(priv, &priv->map->INT_STATUS_SE);
356 if (unlikely(reg == 0xFFFFFFFF)) {
357 spin_unlock_irqrestore(&priv->lock, flags);
358 return IRQ_HANDLED;
359 }
360
361 rtl818x_iowrite32(priv, &priv->map->INT_STATUS_SE, reg);
362
363 if (reg & IMR_TIMEOUT1)
364 rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
365
366 if (reg & (IMR_TBDOK | IMR_TBDER))
367 rtl8180_handle_tx(dev, 4);
368
369 if (reg & (IMR_TVODOK | IMR_TVODER))
370 rtl8180_handle_tx(dev, 0);
371
372 if (reg & (IMR_TVIDOK | IMR_TVIDER))
373 rtl8180_handle_tx(dev, 1);
374
375 if (reg & (IMR_TBEDOK | IMR_TBEDER))
376 rtl8180_handle_tx(dev, 2);
377
378 if (reg & (IMR_TBKDOK | IMR_TBKDER))
379 rtl8180_handle_tx(dev, 3);
380
381 if (reg & (IMR_ROK | IMR_RER | RTL818X_INT_SE_RX_DU | IMR_RQOSOK))
382 rtl8180_handle_rx(dev);
383 /* The interface sometimes generates several RX DMA descriptor errors
384 * at startup. Do not report these.
385 */
386 if ((reg & RTL818X_INT_SE_RX_DU) && desc_err++ > 2)
387 if (net_ratelimit())
388 wiphy_err(dev->wiphy, "No RX DMA Descriptor avail\n");
389
390 spin_unlock_irqrestore(&priv->lock, flags);
391 return IRQ_HANDLED;
392}
393
221static irqreturn_t rtl8180_interrupt(int irq, void *dev_id) 394static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
222{ 395{
223 struct ieee80211_hw *dev = dev_id; 396 struct ieee80211_hw *dev = dev_id;
@@ -234,12 +407,6 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
234 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg); 407 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
235 408
236 if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR)) 409 if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR))
237 rtl8180_handle_tx(dev, 3);
238
239 if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR))
240 rtl8180_handle_tx(dev, 2);
241
242 if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR))
243 rtl8180_handle_tx(dev, 1); 410 rtl8180_handle_tx(dev, 1);
244 411
245 if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR)) 412 if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR))
@@ -263,12 +430,14 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
263 struct rtl8180_tx_ring *ring; 430 struct rtl8180_tx_ring *ring;
264 struct rtl8180_tx_desc *entry; 431 struct rtl8180_tx_desc *entry;
265 unsigned long flags; 432 unsigned long flags;
266 unsigned int idx, prio; 433 unsigned int idx, prio, hw_prio;
267 dma_addr_t mapping; 434 dma_addr_t mapping;
268 u32 tx_flags; 435 u32 tx_flags;
269 u8 rc_flags; 436 u8 rc_flags;
270 u16 plcp_len = 0; 437 u16 plcp_len = 0;
271 __le16 rts_duration = 0; 438 __le16 rts_duration = 0;
439 /* do arithmetic and then convert to le16 */
440 u16 frame_duration = 0;
272 441
273 prio = skb_get_queue_mapping(skb); 442 prio = skb_get_queue_mapping(skb);
274 ring = &priv->tx_ring[prio]; 443 ring = &priv->tx_ring[prio];
@@ -280,7 +449,6 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
280 kfree_skb(skb); 449 kfree_skb(skb);
281 dev_err(&priv->pdev->dev, "TX DMA mapping error\n"); 450 dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
282 return; 451 return;
283
284 } 452 }
285 453
286 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS | 454 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
@@ -288,7 +456,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
288 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) | 456 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
289 skb->len; 457 skb->len;
290 458
291 if (priv->r8185) 459 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
292 tx_flags |= RTL818X_TX_DESC_FLAG_DMA | 460 tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
293 RTL818X_TX_DESC_FLAG_NO_ENC; 461 RTL818X_TX_DESC_FLAG_NO_ENC;
294 462
@@ -305,7 +473,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
305 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len, 473 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
306 info); 474 info);
307 475
308 if (!priv->r8185) { 476 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
309 unsigned int remainder; 477 unsigned int remainder;
310 478
311 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4), 479 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
@@ -316,6 +484,18 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
316 plcp_len |= 1 << 15; 484 plcp_len |= 1 << 15;
317 } 485 }
318 486
487 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
488 __le16 duration;
489 /* SIFS time (required by HW) is already included by
490 * ieee80211_generic_frame_duration
491 */
492 duration = ieee80211_generic_frame_duration(dev, priv->vif,
493 IEEE80211_BAND_2GHZ, skb->len,
494 ieee80211_get_tx_rate(dev, info));
495
496 frame_duration = priv->ack_time + le16_to_cpu(duration);
497 }
498
319 spin_lock_irqsave(&priv->lock, flags); 499 spin_lock_irqsave(&priv->lock, flags);
320 500
321 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 501 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -328,21 +508,91 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
328 idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; 508 idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
329 entry = &ring->desc[idx]; 509 entry = &ring->desc[idx];
330 510
511 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
512 entry->frame_duration = cpu_to_le16(frame_duration);
513 entry->frame_len_se = cpu_to_le16(skb->len);
514
515 /* tpc polarity */
516 entry->flags3 = cpu_to_le16(1<<4);
517 } else
518 entry->frame_len = cpu_to_le32(skb->len);
519
331 entry->rts_duration = rts_duration; 520 entry->rts_duration = rts_duration;
332 entry->plcp_len = cpu_to_le16(plcp_len); 521 entry->plcp_len = cpu_to_le16(plcp_len);
333 entry->tx_buf = cpu_to_le32(mapping); 522 entry->tx_buf = cpu_to_le32(mapping);
334 entry->frame_len = cpu_to_le32(skb->len); 523
335 entry->flags2 = info->control.rates[1].idx >= 0 ? 524 entry->flags2 = info->control.rates[1].idx >= 0 ?
336 ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0; 525 ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0;
337 entry->retry_limit = info->control.rates[0].count; 526 entry->retry_limit = info->control.rates[0].count;
527
528 /* We must be sure that tx_flags is written last because the HW
529 * looks at it to check if the rest of data is valid or not
530 */
531 wmb();
338 entry->flags = cpu_to_le32(tx_flags); 532 entry->flags = cpu_to_le32(tx_flags);
533 /* We must be sure this has been written before followings HW
534 * register write, because this write will made the HW attempts
535 * to DMA the just-written data
536 */
537 wmb();
538
339 __skb_queue_tail(&ring->queue, skb); 539 __skb_queue_tail(&ring->queue, skb);
340 if (ring->entries - skb_queue_len(&ring->queue) < 2) 540 if (ring->entries - skb_queue_len(&ring->queue) < 2)
341 ieee80211_stop_queue(dev, prio); 541 ieee80211_stop_queue(dev, prio);
342 542
343 spin_unlock_irqrestore(&priv->lock, flags); 543 spin_unlock_irqrestore(&priv->lock, flags);
344 544
345 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 545 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
546 /* just poll: rings are stopped with TPPollStop reg */
547 hw_prio = rtl8187se_queues_map[prio];
548 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
549 (1 << hw_prio));
550 } else {
551 hw_prio = rtl8180_queues_map[prio];
552 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
553 (1 << hw_prio) | /* ring to poll */
554 (1<<1) | (1<<2));/* stopped rings */
555 }
556}
557
558static void rtl8180_set_anaparam3(struct rtl8180_priv *priv, u16 anaparam3)
559{
560 u8 reg;
561
562 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
563 RTL818X_EEPROM_CMD_CONFIG);
564
565 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
566 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
567 reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
568
569 rtl818x_iowrite16(priv, &priv->map->ANAPARAM3, anaparam3);
570
571 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
572 reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
573
574 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
575 RTL818X_EEPROM_CMD_NORMAL);
576}
577
578void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2)
579{
580 u8 reg;
581
582 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
583 RTL818X_EEPROM_CMD_CONFIG);
584
585 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
586 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
587 reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
588
589 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
590
591 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
592 reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
593
594 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
595 RTL818X_EEPROM_CMD_NORMAL);
346} 596}
347 597
348void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam) 598void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -359,17 +609,171 @@ void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
359 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 609 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
360} 610}
361 611
612static void rtl8187se_mac_config(struct ieee80211_hw *dev)
613{
614 struct rtl8180_priv *priv = dev->priv;
615 u8 reg;
616
617 rtl818x_iowrite32(priv, REG_ADDR4(0x1F0), 0);
618 rtl818x_ioread32(priv, REG_ADDR4(0x1F0));
619 rtl818x_iowrite32(priv, REG_ADDR4(0x1F4), 0);
620 rtl818x_ioread32(priv, REG_ADDR4(0x1F4));
621 rtl818x_iowrite8(priv, REG_ADDR1(0x1F8), 0);
622 rtl818x_ioread8(priv, REG_ADDR1(0x1F8));
623 /* Enable DA10 TX power saving */
624 reg = rtl818x_ioread8(priv, &priv->map->PHY_PR);
625 rtl818x_iowrite8(priv, &priv->map->PHY_PR, reg | 0x04);
626 /* Power */
627 rtl818x_iowrite16(priv, PI_DATA_REG, 0x1000);
628 rtl818x_iowrite16(priv, SI_DATA_REG, 0x1000);
629 /* AFE - default to power ON */
630 rtl818x_iowrite16(priv, REG_ADDR2(0x370), 0x0560);
631 rtl818x_iowrite16(priv, REG_ADDR2(0x372), 0x0560);
632 rtl818x_iowrite16(priv, REG_ADDR2(0x374), 0x0DA4);
633 rtl818x_iowrite16(priv, REG_ADDR2(0x376), 0x0DA4);
634 rtl818x_iowrite16(priv, REG_ADDR2(0x378), 0x0560);
635 rtl818x_iowrite16(priv, REG_ADDR2(0x37A), 0x0560);
636 rtl818x_iowrite16(priv, REG_ADDR2(0x37C), 0x00EC);
637 rtl818x_iowrite16(priv, REG_ADDR2(0x37E), 0x00EC);
638 rtl818x_iowrite8(priv, REG_ADDR1(0x24E), 0x01);
639 /* unknown, needed for suspend to RAM resume */
640 rtl818x_iowrite8(priv, REG_ADDR1(0x0A), 0x72);
641}
642
643static void rtl8187se_set_antenna_config(struct ieee80211_hw *dev, u8 def_ant,
644 bool diversity)
645{
646 struct rtl8180_priv *priv = dev->priv;
647
648 rtl8225_write_phy_cck(dev, 0x0C, 0x09);
649 if (diversity) {
650 if (def_ant == 1) {
651 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
652 rtl8225_write_phy_cck(dev, 0x11, 0xBB);
653 rtl8225_write_phy_cck(dev, 0x01, 0xC7);
654 rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
655 rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
656 } else { /* main antenna */
657 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
658 rtl8225_write_phy_cck(dev, 0x11, 0x9B);
659 rtl8225_write_phy_cck(dev, 0x01, 0xC7);
660 rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
661 rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
662 }
663 } else { /* disable antenna diversity */
664 if (def_ant == 1) {
665 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
666 rtl8225_write_phy_cck(dev, 0x11, 0xBB);
667 rtl8225_write_phy_cck(dev, 0x01, 0x47);
668 rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
669 rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
670 } else { /* main antenna */
671 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
672 rtl8225_write_phy_cck(dev, 0x11, 0x9B);
673 rtl8225_write_phy_cck(dev, 0x01, 0x47);
674 rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
675 rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
676 }
677 }
678 /* priv->curr_ant = def_ant; */
679}
680
681static void rtl8180_int_enable(struct ieee80211_hw *dev)
682{
683 struct rtl8180_priv *priv = dev->priv;
684
685 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
686 rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
687 IMR_TBDER | IMR_THPDER |
688 IMR_THPDER | IMR_THPDOK |
689 IMR_TVODER | IMR_TVODOK |
690 IMR_TVIDER | IMR_TVIDOK |
691 IMR_TBEDER | IMR_TBEDOK |
692 IMR_TBKDER | IMR_TBKDOK |
693 IMR_RDU | IMR_RER |
694 IMR_ROK | IMR_RQOSOK);
695 } else {
696 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
697 }
698}
699
700static void rtl8180_int_disable(struct ieee80211_hw *dev)
701{
702 struct rtl8180_priv *priv = dev->priv;
703
704 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
705 rtl818x_iowrite32(priv, &priv->map->IMR, 0);
706 } else {
707 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
708 }
709}
710
711static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev,
712 u32 rates_mask)
713{
714 struct rtl8180_priv *priv = dev->priv;
715
716 u8 max, min;
717 u16 reg;
718
719 max = fls(rates_mask) - 1;
720 min = ffs(rates_mask) - 1;
721
722 switch (priv->chip_family) {
723
724 case RTL818X_CHIP_FAMILY_RTL8180:
725 /* in 8180 this is NOT a BITMAP */
726 reg = rtl818x_ioread16(priv, &priv->map->BRSR);
727 reg &= ~3;
728 reg |= max;
729 rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
730 break;
731
732 case RTL818X_CHIP_FAMILY_RTL8185:
733 /* in 8185 this is a BITMAP */
734 rtl818x_iowrite16(priv, &priv->map->BRSR, rates_mask);
735 rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (max << 4) | min);
736 break;
737
738 case RTL818X_CHIP_FAMILY_RTL8187SE:
739 /* in 8187se this is a BITMAP */
740 rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, rates_mask);
741 break;
742 }
743}
744
745static void rtl8180_config_cardbus(struct ieee80211_hw *dev)
746{
747 struct rtl8180_priv *priv = dev->priv;
748 u16 reg16;
749 u8 reg8;
750
751 reg8 = rtl818x_ioread8(priv, &priv->map->CONFIG3);
752 reg8 |= 1 << 1;
753 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg8);
754
755 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
756 rtl818x_iowrite16(priv, FEMR_SE, 0xffff);
757 } else {
758 reg16 = rtl818x_ioread16(priv, &priv->map->FEMR);
759 reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
760 rtl818x_iowrite16(priv, &priv->map->FEMR, reg16);
761 }
762
763}
764
362static int rtl8180_init_hw(struct ieee80211_hw *dev) 765static int rtl8180_init_hw(struct ieee80211_hw *dev)
363{ 766{
364 struct rtl8180_priv *priv = dev->priv; 767 struct rtl8180_priv *priv = dev->priv;
365 u16 reg; 768 u16 reg;
769 u32 reg32;
366 770
367 rtl818x_iowrite8(priv, &priv->map->CMD, 0); 771 rtl818x_iowrite8(priv, &priv->map->CMD, 0);
368 rtl818x_ioread8(priv, &priv->map->CMD); 772 rtl818x_ioread8(priv, &priv->map->CMD);
369 msleep(10); 773 msleep(10);
370 774
371 /* reset */ 775 /* reset */
372 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 776 rtl8180_int_disable(dev);
373 rtl818x_ioread8(priv, &priv->map->CMD); 777 rtl818x_ioread8(priv, &priv->map->CMD);
374 778
375 reg = rtl818x_ioread8(priv, &priv->map->CMD); 779 reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -390,31 +794,45 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
390 msleep(200); 794 msleep(200);
391 795
392 if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) { 796 if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) {
393 /* For cardbus */ 797 rtl8180_config_cardbus(dev);
394 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
395 reg |= 1 << 1;
396 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
397 reg = rtl818x_ioread16(priv, &priv->map->FEMR);
398 reg |= (1 << 15) | (1 << 14) | (1 << 4);
399 rtl818x_iowrite16(priv, &priv->map->FEMR, reg);
400 } 798 }
401 799
402 rtl818x_iowrite8(priv, &priv->map->MSR, 0); 800 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
801 rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
802 else
803 rtl818x_iowrite8(priv, &priv->map->MSR, 0);
403 804
404 if (!priv->r8185) 805 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
405 rtl8180_set_anaparam(priv, priv->anaparam); 806 rtl8180_set_anaparam(priv, priv->anaparam);
406 807
407 rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma); 808 rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
408 rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma); 809 /* mac80211 queue have higher prio for lower index. The last queue
409 rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma); 810 * (that mac80211 is not aware of) is reserved for beacons (and have
410 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma); 811 * the highest priority on the NIC)
411 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma); 812 */
813 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
814 rtl818x_iowrite32(priv, &priv->map->TBDA,
815 priv->tx_ring[1].dma);
816 rtl818x_iowrite32(priv, &priv->map->TLPDA,
817 priv->tx_ring[0].dma);
818 } else {
819 rtl818x_iowrite32(priv, &priv->map->TBDA,
820 priv->tx_ring[4].dma);
821 rtl818x_iowrite32(priv, &priv->map->TVODA,
822 priv->tx_ring[0].dma);
823 rtl818x_iowrite32(priv, &priv->map->TVIDA,
824 priv->tx_ring[1].dma);
825 rtl818x_iowrite32(priv, &priv->map->TBEDA,
826 priv->tx_ring[2].dma);
827 rtl818x_iowrite32(priv, &priv->map->TBKDA,
828 priv->tx_ring[3].dma);
829 }
412 830
413 /* TODO: necessary? specs indicate not */ 831 /* TODO: necessary? specs indicate not */
414 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 832 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
415 reg = rtl818x_ioread8(priv, &priv->map->CONFIG2); 833 reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
416 rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3)); 834 rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3));
417 if (priv->r8185) { 835 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
418 reg = rtl818x_ioread8(priv, &priv->map->CONFIG2); 836 reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
419 rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4)); 837 rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4));
420 } 838 }
@@ -426,13 +844,17 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
426 844
427 rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0); 845 rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
428 846
429 if (priv->r8185) { 847 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
430 rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); 848 rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
431 rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81); 849 rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81);
432 rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0); 850 } else {
851 rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
433 852
434 rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); 853 rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
854 rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
855 }
435 856
857 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
436 /* TODO: set ClkRun enable? necessary? */ 858 /* TODO: set ClkRun enable? necessary? */
437 reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE); 859 reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE);
438 rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6)); 860 rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6));
@@ -440,28 +862,90 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
440 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); 862 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
441 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2)); 863 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2));
442 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 864 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
443 } else { 865 }
444 rtl818x_iowrite16(priv, &priv->map->BRSR, 0x1);
445 rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
446 866
447 rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6); 867 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
448 rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C); 868
869 /* the set auto rate fallback bitmask from 1M to 54 Mb/s */
870 rtl818x_iowrite16(priv, ARFR, 0xFFF);
871 rtl818x_ioread16(priv, ARFR);
872
873 /* stop unused queus (no dma alloc) */
874 rtl818x_iowrite8(priv, &priv->map->TPPOLL_STOP,
875 RTL818x_TPPOLL_STOP_MG | RTL818x_TPPOLL_STOP_HI);
876
877 rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0x00);
878 rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50);
879
880 rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0);
881
882 /* some black magic here.. */
883 rtl8187se_mac_config(dev);
884
885 rtl818x_iowrite16(priv, RFSW_CTRL, 0x569A);
886 rtl818x_ioread16(priv, RFSW_CTRL);
887
888 rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_ON);
889 rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_ON);
890 rtl8180_set_anaparam3(priv, RTL8225SE_ANAPARAM3);
891
892
893 rtl818x_iowrite8(priv, &priv->map->CONFIG5,
894 rtl818x_ioread8(priv, &priv->map->CONFIG5) & 0x7F);
895
896 /*probably this switch led on */
897 rtl818x_iowrite8(priv, &priv->map->PGSELECT,
898 rtl818x_ioread8(priv, &priv->map->PGSELECT) | 0x08);
899
900 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
901 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1BFF);
902 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
903
904 rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x4003);
905
906 /* the reference code mac hardcode table write
907 * this reg by doing byte-wide accesses.
908 * It does it just for lowest and highest byte..
909 */
910 reg32 = rtl818x_ioread32(priv, &priv->map->RF_PARA);
911 reg32 &= 0x00ffff00;
912 reg32 |= 0xb8000054;
913 rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
449 } 914 }
450 915
451 priv->rf->init(dev); 916 priv->rf->init(dev);
452 if (priv->r8185) 917
453 rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); 918 /* default basic rates are 1,2 Mbps for rtl8180. 1,2,6,9,12,18,24 Mbps
919 * otherwise. bitmask 0x3 and 0x01f3 respectively.
920 * NOTE: currenty rtl8225 RF code changes basic rates, so we need to do
921 * this after rf init.
922 * TODO: try to find out whether RF code really needs to do this..
923 */
924 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
925 rtl8180_conf_basic_rates(dev, 0x3);
926 else
927 rtl8180_conf_basic_rates(dev, 0x1f3);
928
929 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
930 rtl8187se_set_antenna_config(dev,
931 priv->antenna_diversity_default,
932 priv->antenna_diversity_en);
454 return 0; 933 return 0;
455} 934}
456 935
457static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) 936static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
458{ 937{
459 struct rtl8180_priv *priv = dev->priv; 938 struct rtl8180_priv *priv = dev->priv;
460 struct rtl8180_rx_desc *entry; 939 struct rtl818x_rx_cmd_desc *entry;
461 int i; 940 int i;
462 941
942 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
943 priv->rx_ring_sz = sizeof(struct rtl8187se_rx_desc);
944 else
945 priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc);
946
463 priv->rx_ring = pci_alloc_consistent(priv->pdev, 947 priv->rx_ring = pci_alloc_consistent(priv->pdev,
464 sizeof(*priv->rx_ring) * 32, 948 priv->rx_ring_sz * 32,
465 &priv->rx_ring_dma); 949 &priv->rx_ring_dma);
466 950
467 if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { 951 if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
@@ -469,20 +953,28 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
469 return -ENOMEM; 953 return -ENOMEM;
470 } 954 }
471 955
472 memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * 32); 956 memset(priv->rx_ring, 0, priv->rx_ring_sz * 32);
473 priv->rx_idx = 0; 957 priv->rx_idx = 0;
474 958
475 for (i = 0; i < 32; i++) { 959 for (i = 0; i < 32; i++) {
476 struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE); 960 struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE);
477 dma_addr_t *mapping; 961 dma_addr_t *mapping;
478 entry = &priv->rx_ring[i]; 962 entry = priv->rx_ring + priv->rx_ring_sz*i;
479 if (!skb) 963 if (!skb) {
480 return 0; 964 wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
481 965 return -ENOMEM;
966 }
482 priv->rx_buf[i] = skb; 967 priv->rx_buf[i] = skb;
483 mapping = (dma_addr_t *)skb->cb; 968 mapping = (dma_addr_t *)skb->cb;
484 *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb), 969 *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
485 MAX_RX_SIZE, PCI_DMA_FROMDEVICE); 970 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
971
972 if (pci_dma_mapping_error(priv->pdev, *mapping)) {
973 kfree_skb(skb);
974 wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
975 return -ENOMEM;
976 }
977
486 entry->rx_buf = cpu_to_le32(*mapping); 978 entry->rx_buf = cpu_to_le32(*mapping);
487 entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN | 979 entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
488 MAX_RX_SIZE); 980 MAX_RX_SIZE);
@@ -507,7 +999,7 @@ static void rtl8180_free_rx_ring(struct ieee80211_hw *dev)
507 kfree_skb(skb); 999 kfree_skb(skb);
508 } 1000 }
509 1001
510 pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring) * 32, 1002 pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
511 priv->rx_ring, priv->rx_ring_dma); 1003 priv->rx_ring, priv->rx_ring_dma);
512 priv->rx_ring = NULL; 1004 priv->rx_ring = NULL;
513} 1005}
@@ -571,7 +1063,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
571 if (ret) 1063 if (ret)
572 return ret; 1064 return ret;
573 1065
574 for (i = 0; i < 4; i++) 1066 for (i = 0; i < (dev->queues + 1); i++)
575 if ((ret = rtl8180_init_tx_ring(dev, i, 16))) 1067 if ((ret = rtl8180_init_tx_ring(dev, i, 16)))
576 goto err_free_rings; 1068 goto err_free_rings;
577 1069
@@ -579,23 +1071,28 @@ static int rtl8180_start(struct ieee80211_hw *dev)
579 if (ret) 1071 if (ret)
580 goto err_free_rings; 1072 goto err_free_rings;
581 1073
582 rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma); 1074 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
583 rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma); 1075 ret = request_irq(priv->pdev->irq, rtl8187se_interrupt,
584 rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma);
585 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
586 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
587
588 ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
589 IRQF_SHARED, KBUILD_MODNAME, dev); 1076 IRQF_SHARED, KBUILD_MODNAME, dev);
1077 } else {
1078 ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
1079 IRQF_SHARED, KBUILD_MODNAME, dev);
1080 }
1081
590 if (ret) { 1082 if (ret) {
591 wiphy_err(dev->wiphy, "failed to register IRQ handler\n"); 1083 wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
592 goto err_free_rings; 1084 goto err_free_rings;
593 } 1085 }
594 1086
595 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); 1087 rtl8180_int_enable(dev);
596 1088
597 rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); 1089 /* in rtl8187se at MAR regs offset there is the management
598 rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); 1090 * TX descriptor DMA addres..
1091 */
1092 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
1093 rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
1094 rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
1095 }
599 1096
600 reg = RTL818X_RX_CONF_ONLYERLPKT | 1097 reg = RTL818X_RX_CONF_ONLYERLPKT |
601 RTL818X_RX_CONF_RX_AUTORESETPHY | 1098 RTL818X_RX_CONF_RX_AUTORESETPHY |
@@ -605,27 +1102,42 @@ static int rtl8180_start(struct ieee80211_hw *dev)
605 RTL818X_RX_CONF_BROADCAST | 1102 RTL818X_RX_CONF_BROADCAST |
606 RTL818X_RX_CONF_NICMAC; 1103 RTL818X_RX_CONF_NICMAC;
607 1104
608 if (priv->r8185) 1105 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185)
609 reg |= RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2; 1106 reg |= RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2;
610 else { 1107 else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
611 reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE1) 1108 reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE1)
612 ? RTL818X_RX_CONF_CSDM1 : 0; 1109 ? RTL818X_RX_CONF_CSDM1 : 0;
613 reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE2) 1110 reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE2)
614 ? RTL818X_RX_CONF_CSDM2 : 0; 1111 ? RTL818X_RX_CONF_CSDM2 : 0;
1112 } else {
1113 reg &= ~(RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2);
615 } 1114 }
616 1115
617 priv->rx_conf = reg; 1116 priv->rx_conf = reg;
618 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); 1117 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
619 1118
620 if (priv->r8185) { 1119 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
621 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); 1120 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
622 reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT; 1121
623 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; 1122 /* CW is not on per-packet basis.
1123 * in rtl8185 the CW_VALUE reg is used.
1124 * in rtl8187se the AC param regs are used.
1125 */
1126 reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
1127 /* retry limit IS on per-packet basis.
1128 * the short and long retry limit in TX_CONF
1129 * reg are ignored
1130 */
1131 reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
624 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); 1132 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
625 1133
626 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); 1134 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
627 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT; 1135 /* TX antenna and TX gain are not on per-packet basis.
628 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT; 1136 * TX Antenna is selected by ANTSEL reg (RX in BB regs).
1137 * TX gain is selected with CCK_TX_AGC and OFDM_TX_AGC regs
1138 */
1139 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
1140 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
629 reg |= RTL818X_TX_AGC_CTL_FEEDBACK_ANT; 1141 reg |= RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
630 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); 1142 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
631 1143
@@ -637,11 +1149,16 @@ static int rtl8180_start(struct ieee80211_hw *dev)
637 reg |= (6 << 21 /* MAX TX DMA */) | 1149 reg |= (6 << 21 /* MAX TX DMA */) |
638 RTL818X_TX_CONF_NO_ICV; 1150 RTL818X_TX_CONF_NO_ICV;
639 1151
640 if (priv->r8185) 1152 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
1153 reg |= 1<<30; /* "duration procedure mode" */
1154
1155 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
641 reg &= ~RTL818X_TX_CONF_PROBE_DTS; 1156 reg &= ~RTL818X_TX_CONF_PROBE_DTS;
642 else 1157 else
643 reg &= ~RTL818X_TX_CONF_HW_SEQNUM; 1158 reg &= ~RTL818X_TX_CONF_HW_SEQNUM;
644 1159
1160 reg &= ~RTL818X_TX_CONF_DISCW;
1161
645 /* different meaning, same value on both rtl8185 and rtl8180 */ 1162 /* different meaning, same value on both rtl8185 and rtl8180 */
646 reg &= ~RTL818X_TX_CONF_SAT_HWPLCP; 1163 reg &= ~RTL818X_TX_CONF_SAT_HWPLCP;
647 1164
@@ -656,7 +1173,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
656 1173
657 err_free_rings: 1174 err_free_rings:
658 rtl8180_free_rx_ring(dev); 1175 rtl8180_free_rx_ring(dev);
659 for (i = 0; i < 4; i++) 1176 for (i = 0; i < (dev->queues + 1); i++)
660 if (priv->tx_ring[i].desc) 1177 if (priv->tx_ring[i].desc)
661 rtl8180_free_tx_ring(dev, i); 1178 rtl8180_free_tx_ring(dev, i);
662 1179
@@ -669,7 +1186,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
669 u8 reg; 1186 u8 reg;
670 int i; 1187 int i;
671 1188
672 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 1189 rtl8180_int_disable(dev);
673 1190
674 reg = rtl818x_ioread8(priv, &priv->map->CMD); 1191 reg = rtl818x_ioread8(priv, &priv->map->CMD);
675 reg &= ~RTL818X_CMD_TX_ENABLE; 1192 reg &= ~RTL818X_CMD_TX_ENABLE;
@@ -686,7 +1203,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
686 free_irq(priv->pdev->irq, dev); 1203 free_irq(priv->pdev->irq, dev);
687 1204
688 rtl8180_free_rx_ring(dev); 1205 rtl8180_free_rx_ring(dev);
689 for (i = 0; i < 4; i++) 1206 for (i = 0; i < (dev->queues + 1); i++)
690 rtl8180_free_tx_ring(dev, i); 1207 rtl8180_free_tx_ring(dev, i);
691} 1208}
692 1209
@@ -794,6 +1311,123 @@ static int rtl8180_config(struct ieee80211_hw *dev, u32 changed)
794 return 0; 1311 return 0;
795} 1312}
796 1313
1314static void rtl8187se_conf_ac_parm(struct ieee80211_hw *dev, u8 queue)
1315{
1316 const struct ieee80211_tx_queue_params *params;
1317 struct rtl8180_priv *priv = dev->priv;
1318
1319 /* hw value */
1320 u32 ac_param;
1321
1322 u8 aifs;
1323 u8 txop;
1324 u8 cw_min, cw_max;
1325
1326 params = &priv->queue_param[queue];
1327
1328 cw_min = fls(params->cw_min);
1329 cw_max = fls(params->cw_max);
1330
1331 aifs = 10 + params->aifs * priv->slot_time;
1332
1333 /* TODO: check if txop HW is in us (mult by 32) */
1334 txop = params->txop;
1335
1336 ac_param = txop << AC_PARAM_TXOP_LIMIT_SHIFT |
1337 cw_max << AC_PARAM_ECW_MAX_SHIFT |
1338 cw_min << AC_PARAM_ECW_MIN_SHIFT |
1339 aifs << AC_PARAM_AIFS_SHIFT;
1340
1341 switch (queue) {
1342 case IEEE80211_AC_BK:
1343 rtl818x_iowrite32(priv, &priv->map->AC_BK_PARAM, ac_param);
1344 break;
1345 case IEEE80211_AC_BE:
1346 rtl818x_iowrite32(priv, &priv->map->AC_BE_PARAM, ac_param);
1347 break;
1348 case IEEE80211_AC_VI:
1349 rtl818x_iowrite32(priv, &priv->map->AC_VI_PARAM, ac_param);
1350 break;
1351 case IEEE80211_AC_VO:
1352 rtl818x_iowrite32(priv, &priv->map->AC_VO_PARAM, ac_param);
1353 break;
1354 }
1355}
1356
1357static int rtl8180_conf_tx(struct ieee80211_hw *dev,
1358 struct ieee80211_vif *vif, u16 queue,
1359 const struct ieee80211_tx_queue_params *params)
1360{
1361 struct rtl8180_priv *priv = dev->priv;
1362 u8 cw_min, cw_max;
1363
1364 /* nothing to do ? */
1365 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
1366 return 0;
1367
1368 cw_min = fls(params->cw_min);
1369 cw_max = fls(params->cw_max);
1370
1371 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
1372 priv->queue_param[queue] = *params;
1373 rtl8187se_conf_ac_parm(dev, queue);
1374 } else
1375 rtl818x_iowrite8(priv, &priv->map->CW_VAL,
1376 (cw_max << 4) | cw_min);
1377 return 0;
1378}
1379
1380static void rtl8180_conf_erp(struct ieee80211_hw *dev,
1381 struct ieee80211_bss_conf *info)
1382{
1383 struct rtl8180_priv *priv = dev->priv;
1384 u8 sifs, difs;
1385 int eifs;
1386 u8 hw_eifs;
1387
1388 /* TODO: should we do something ? */
1389 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
1390 return;
1391
1392 /* I _hope_ this means 10uS for the HW.
1393 * In reference code it is 0x22 for
1394 * both rtl8187L and rtl8187SE
1395 */
1396 sifs = 0x22;
1397
1398 if (info->use_short_slot)
1399 priv->slot_time = 9;
1400 else
1401 priv->slot_time = 20;
1402
1403 /* 10 is SIFS time in uS */
1404 difs = 10 + 2 * priv->slot_time;
1405 eifs = 10 + difs + priv->ack_time;
1406
1407 /* HW should use 4uS units for EIFS (I'm sure for rtl8185)*/
1408 hw_eifs = DIV_ROUND_UP(eifs, 4);
1409
1410
1411 rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time);
1412 rtl818x_iowrite8(priv, &priv->map->SIFS, sifs);
1413 rtl818x_iowrite8(priv, &priv->map->DIFS, difs);
1414
1415 /* from reference code. set ack timeout reg = eifs reg */
1416 rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, hw_eifs);
1417
1418 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
1419 rtl818x_iowrite8(priv, &priv->map->EIFS_8187SE, hw_eifs);
1420 else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
1421 /* rtl8187/rtl8185 HW bug. After EIFS is elapsed,
1422 * the HW still wait for DIFS.
1423 * HW uses 4uS units for EIFS.
1424 */
1425 hw_eifs = DIV_ROUND_UP(eifs - difs, 4);
1426
1427 rtl818x_iowrite8(priv, &priv->map->EIFS, hw_eifs);
1428 }
1429}
1430
797static void rtl8180_bss_info_changed(struct ieee80211_hw *dev, 1431static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
798 struct ieee80211_vif *vif, 1432 struct ieee80211_vif *vif,
799 struct ieee80211_bss_conf *info, 1433 struct ieee80211_bss_conf *info,
@@ -818,11 +1452,40 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
818 reg = RTL818X_MSR_INFRA; 1452 reg = RTL818X_MSR_INFRA;
819 } else 1453 } else
820 reg = RTL818X_MSR_NO_LINK; 1454 reg = RTL818X_MSR_NO_LINK;
1455
1456 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
1457 reg |= RTL818X_MSR_ENEDCA;
1458
821 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1459 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
822 } 1460 }
823 1461
824 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp) 1462 if (changed & BSS_CHANGED_BASIC_RATES)
825 priv->rf->conf_erp(dev, info); 1463 rtl8180_conf_basic_rates(dev, info->basic_rates);
1464
1465 if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE)) {
1466
1467 /* when preamble changes, acktime duration changes, and erp must
1468 * be recalculated. ACK time is calculated at lowest rate.
1469 * Since mac80211 include SIFS time we remove it (-10)
1470 */
1471 priv->ack_time =
1472 le16_to_cpu(ieee80211_generic_frame_duration(dev,
1473 priv->vif,
1474 IEEE80211_BAND_2GHZ, 10,
1475 &priv->rates[0])) - 10;
1476
1477 rtl8180_conf_erp(dev, info);
1478
1479 /* mac80211 supplies aifs_n to driver and calls
1480 * conf_tx callback whether aifs_n changes, NOT
1481 * when aifs changes.
1482 * Aifs should be recalculated if slot changes.
1483 */
1484 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
1485 for (i = 0; i < 4; i++)
1486 rtl8187se_conf_ac_parm(dev, i);
1487 }
1488 }
826 1489
827 if (changed & BSS_CHANGED_BEACON_ENABLED) 1490 if (changed & BSS_CHANGED_BEACON_ENABLED)
828 vif_priv->enable_beacon = info->enable_beacon; 1491 vif_priv->enable_beacon = info->enable_beacon;
@@ -880,6 +1543,7 @@ static const struct ieee80211_ops rtl8180_ops = {
880 .remove_interface = rtl8180_remove_interface, 1543 .remove_interface = rtl8180_remove_interface,
881 .config = rtl8180_config, 1544 .config = rtl8180_config,
882 .bss_info_changed = rtl8180_bss_info_changed, 1545 .bss_info_changed = rtl8180_bss_info_changed,
1546 .conf_tx = rtl8180_conf_tx,
883 .prepare_multicast = rtl8180_prepare_multicast, 1547 .prepare_multicast = rtl8180_prepare_multicast,
884 .configure_filter = rtl8180_configure_filter, 1548 .configure_filter = rtl8180_configure_filter,
885 .get_tsf = rtl8180_get_tsf, 1549 .get_tsf = rtl8180_get_tsf,
@@ -887,8 +1551,7 @@ static const struct ieee80211_ops rtl8180_ops = {
887 1551
888static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom) 1552static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
889{ 1553{
890 struct ieee80211_hw *dev = eeprom->data; 1554 struct rtl8180_priv *priv = eeprom->data;
891 struct rtl8180_priv *priv = dev->priv;
892 u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); 1555 u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
893 1556
894 eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE; 1557 eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE;
@@ -899,8 +1562,7 @@ static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
899 1562
900static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom) 1563static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
901{ 1564{
902 struct ieee80211_hw *dev = eeprom->data; 1565 struct rtl8180_priv *priv = eeprom->data;
903 struct rtl8180_priv *priv = dev->priv;
904 u8 reg = 2 << 6; 1566 u8 reg = 2 << 6;
905 1567
906 if (eeprom->reg_data_in) 1568 if (eeprom->reg_data_in)
@@ -917,6 +1579,83 @@ static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
917 udelay(10); 1579 udelay(10);
918} 1580}
919 1581
1582static void rtl8180_eeprom_read(struct rtl8180_priv *priv)
1583{
1584 struct eeprom_93cx6 eeprom;
1585 int eeprom_cck_table_adr;
1586 u16 eeprom_val;
1587 int i;
1588
1589 eeprom.data = priv;
1590 eeprom.register_read = rtl8180_eeprom_register_read;
1591 eeprom.register_write = rtl8180_eeprom_register_write;
1592 if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
1593 eeprom.width = PCI_EEPROM_WIDTH_93C66;
1594 else
1595 eeprom.width = PCI_EEPROM_WIDTH_93C46;
1596
1597 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
1598 RTL818X_EEPROM_CMD_PROGRAM);
1599 rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
1600 udelay(10);
1601
1602 eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
1603 eeprom_val &= 0xFF;
1604 priv->rf_type = eeprom_val;
1605
1606 eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
1607 priv->csthreshold = eeprom_val >> 8;
1608
1609 eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)priv->mac_addr, 3);
1610
1611 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
1612 eeprom_cck_table_adr = 0x30;
1613 else
1614 eeprom_cck_table_adr = 0x10;
1615
1616 /* CCK TX power */
1617 for (i = 0; i < 14; i += 2) {
1618 u16 txpwr;
1619 eeprom_93cx6_read(&eeprom, eeprom_cck_table_adr + (i >> 1),
1620 &txpwr);
1621 priv->channels[i].hw_value = txpwr & 0xFF;
1622 priv->channels[i + 1].hw_value = txpwr >> 8;
1623 }
1624
1625 /* OFDM TX power */
1626 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
1627 for (i = 0; i < 14; i += 2) {
1628 u16 txpwr;
1629 eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
1630 priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
1631 priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
1632 }
1633 }
1634
1635 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
1636 __le32 anaparam;
1637 eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
1638 priv->anaparam = le32_to_cpu(anaparam);
1639 eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
1640 }
1641
1642 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
1643 eeprom_93cx6_read(&eeprom, 0x3F, &eeprom_val);
1644 priv->antenna_diversity_en = !!(eeprom_val & 0x100);
1645 priv->antenna_diversity_default = (eeprom_val & 0xC00) == 0x400;
1646
1647 eeprom_93cx6_read(&eeprom, 0x7C, &eeprom_val);
1648 priv->xtal_out = eeprom_val & 0xF;
1649 priv->xtal_in = (eeprom_val & 0xF0) >> 4;
1650 priv->xtal_cal = !!(eeprom_val & 0x1000);
1651 priv->thermal_meter_val = (eeprom_val & 0xF00) >> 8;
1652 priv->thermal_meter_en = !!(eeprom_val & 0x2000);
1653 }
1654
1655 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
1656 RTL818X_EEPROM_CMD_NORMAL);
1657}
1658
920static int rtl8180_probe(struct pci_dev *pdev, 1659static int rtl8180_probe(struct pci_dev *pdev,
921 const struct pci_device_id *id) 1660 const struct pci_device_id *id)
922{ 1661{
@@ -924,12 +1663,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
924 struct rtl8180_priv *priv; 1663 struct rtl8180_priv *priv;
925 unsigned long mem_addr, mem_len; 1664 unsigned long mem_addr, mem_len;
926 unsigned int io_addr, io_len; 1665 unsigned int io_addr, io_len;
927 int err, i; 1666 int err;
928 struct eeprom_93cx6 eeprom;
929 const char *chip_name, *rf_name = NULL; 1667 const char *chip_name, *rf_name = NULL;
930 u32 reg; 1668 u32 reg;
931 u16 eeprom_val;
932 u8 mac_addr[ETH_ALEN];
933 1669
934 err = pci_enable_device(pdev); 1670 err = pci_enable_device(pdev);
935 if (err) { 1671 if (err) {
@@ -1011,7 +1747,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
1011 dev->vif_data_size = sizeof(struct rtl8180_vif); 1747 dev->vif_data_size = sizeof(struct rtl8180_vif);
1012 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 1748 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1013 BIT(NL80211_IFTYPE_ADHOC); 1749 BIT(NL80211_IFTYPE_ADHOC);
1014 dev->queues = 1;
1015 dev->max_signal = 65; 1750 dev->max_signal = 65;
1016 1751
1017 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 1752 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
@@ -1019,43 +1754,55 @@ static int rtl8180_probe(struct pci_dev *pdev,
1019 switch (reg) { 1754 switch (reg) {
1020 case RTL818X_TX_CONF_R8180_ABCD: 1755 case RTL818X_TX_CONF_R8180_ABCD:
1021 chip_name = "RTL8180"; 1756 chip_name = "RTL8180";
1757 priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
1022 break; 1758 break;
1759
1023 case RTL818X_TX_CONF_R8180_F: 1760 case RTL818X_TX_CONF_R8180_F:
1024 chip_name = "RTL8180vF"; 1761 chip_name = "RTL8180vF";
1762 priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
1025 break; 1763 break;
1764
1026 case RTL818X_TX_CONF_R8185_ABC: 1765 case RTL818X_TX_CONF_R8185_ABC:
1027 chip_name = "RTL8185"; 1766 chip_name = "RTL8185";
1767 priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
1028 break; 1768 break;
1769
1029 case RTL818X_TX_CONF_R8185_D: 1770 case RTL818X_TX_CONF_R8185_D:
1030 chip_name = "RTL8185vD"; 1771 chip_name = "RTL8185vD";
1772 priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
1773 break;
1774
1775 case RTL818X_TX_CONF_RTL8187SE:
1776 chip_name = "RTL8187SE";
1777 priv->chip_family = RTL818X_CHIP_FAMILY_RTL8187SE;
1031 break; 1778 break;
1779
1032 default: 1780 default:
1033 printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n", 1781 printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n",
1034 pci_name(pdev), reg >> 25); 1782 pci_name(pdev), reg >> 25);
1035 goto err_iounmap; 1783 goto err_iounmap;
1036 } 1784 }
1037 1785
1038 priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC; 1786 /* we declare to MAC80211 all the queues except for beacon queue
1039 if (priv->r8185) { 1787 * that will be eventually handled by DRV.
1788 * TX rings are arranged in such a way that lower is the IDX,
1789 * higher is the priority, in order to achieve direct mapping
1790 * with mac80211, however the beacon queue is an exception and it
1791 * is mapped on the highst tx ring IDX.
1792 */
1793 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
1794 dev->queues = RTL8187SE_NR_TX_QUEUES - 1;
1795 else
1796 dev->queues = RTL8180_NR_TX_QUEUES - 1;
1797
1798 if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
1040 priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates); 1799 priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
1041 pci_try_set_mwi(pdev); 1800 pci_try_set_mwi(pdev);
1042 } 1801 }
1043 1802
1044 eeprom.data = dev; 1803 rtl8180_eeprom_read(priv);
1045 eeprom.register_read = rtl8180_eeprom_register_read;
1046 eeprom.register_write = rtl8180_eeprom_register_write;
1047 if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
1048 eeprom.width = PCI_EEPROM_WIDTH_93C66;
1049 else
1050 eeprom.width = PCI_EEPROM_WIDTH_93C46;
1051
1052 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_PROGRAM);
1053 rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
1054 udelay(10);
1055 1804
1056 eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val); 1805 switch (priv->rf_type) {
1057 eeprom_val &= 0xFF;
1058 switch (eeprom_val) {
1059 case 1: rf_name = "Intersil"; 1806 case 1: rf_name = "Intersil";
1060 break; 1807 break;
1061 case 2: rf_name = "RFMD"; 1808 case 2: rf_name = "RFMD";
@@ -1066,14 +1813,18 @@ static int rtl8180_probe(struct pci_dev *pdev,
1066 break; 1813 break;
1067 case 5: priv->rf = &grf5101_rf_ops; 1814 case 5: priv->rf = &grf5101_rf_ops;
1068 break; 1815 break;
1069 case 9: priv->rf = rtl8180_detect_rf(dev); 1816 case 9:
1817 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
1818 priv->rf = rtl8187se_detect_rf(dev);
1819 else
1820 priv->rf = rtl8180_detect_rf(dev);
1070 break; 1821 break;
1071 case 10: 1822 case 10:
1072 rf_name = "RTL8255"; 1823 rf_name = "RTL8255";
1073 break; 1824 break;
1074 default: 1825 default:
1075 printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n", 1826 printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n",
1076 pci_name(pdev), eeprom_val); 1827 pci_name(pdev), priv->rf_type);
1077 goto err_iounmap; 1828 goto err_iounmap;
1078 } 1829 }
1079 1830
@@ -1083,42 +1834,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
1083 goto err_iounmap; 1834 goto err_iounmap;
1084 } 1835 }
1085 1836
1086 eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val); 1837 if (!is_valid_ether_addr(priv->mac_addr)) {
1087 priv->csthreshold = eeprom_val >> 8;
1088 if (!priv->r8185) {
1089 __le32 anaparam;
1090 eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
1091 priv->anaparam = le32_to_cpu(anaparam);
1092 eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
1093 }
1094
1095 eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
1096 if (!is_valid_ether_addr(mac_addr)) {
1097 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using" 1838 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
1098 " randomly generated MAC addr\n", pci_name(pdev)); 1839 " randomly generated MAC addr\n", pci_name(pdev));
1099 eth_random_addr(mac_addr); 1840 eth_random_addr(priv->mac_addr);
1100 }
1101 SET_IEEE80211_PERM_ADDR(dev, mac_addr);
1102
1103 /* CCK TX power */
1104 for (i = 0; i < 14; i += 2) {
1105 u16 txpwr;
1106 eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr);
1107 priv->channels[i].hw_value = txpwr & 0xFF;
1108 priv->channels[i + 1].hw_value = txpwr >> 8;
1109 }
1110
1111 /* OFDM TX power */
1112 if (priv->r8185) {
1113 for (i = 0; i < 14; i += 2) {
1114 u16 txpwr;
1115 eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
1116 priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
1117 priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
1118 }
1119 } 1841 }
1120 1842 SET_IEEE80211_PERM_ADDR(dev, priv->mac_addr);
1121 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
1122 1843
1123 spin_lock_init(&priv->lock); 1844 spin_lock_init(&priv->lock);
1124 1845
@@ -1130,12 +1851,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
1130 } 1851 }
1131 1852
1132 wiphy_info(dev->wiphy, "hwaddr %pm, %s + %s\n", 1853 wiphy_info(dev->wiphy, "hwaddr %pm, %s + %s\n",
1133 mac_addr, chip_name, priv->rf->name); 1854 priv->mac_addr, chip_name, priv->rf->name);
1134 1855
1135 return 0; 1856 return 0;
1136 1857
1137 err_iounmap: 1858 err_iounmap:
1138 iounmap(priv->map); 1859 pci_iounmap(pdev, priv->map);
1139 1860
1140 err_free_dev: 1861 err_free_dev:
1141 ieee80211_free_hw(dev); 1862 ieee80211_free_hw(dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
index 30523314da43..291a55970d1a 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
@@ -24,27 +24,64 @@
24#define ANAPARAM_PWR1_SHIFT 20 24#define ANAPARAM_PWR1_SHIFT 20
25#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT) 25#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT)
26 26
27/* rtl8180/rtl8185 have 3 queue + beacon queue.
28 * mac80211 can use just one, + beacon = 2 tot.
29 */
30#define RTL8180_NR_TX_QUEUES 2
31
32/* rtl8187SE have 6 queues + beacon queues
33 * mac80211 can use 4 QoS data queue, + beacon = 5 tot
34 */
35#define RTL8187SE_NR_TX_QUEUES 5
36
37/* for array static allocation, it is the max of above */
38#define RTL818X_NR_TX_QUEUES 5
39
27struct rtl8180_tx_desc { 40struct rtl8180_tx_desc {
28 __le32 flags; 41 __le32 flags;
29 __le16 rts_duration; 42 __le16 rts_duration;
30 __le16 plcp_len; 43 __le16 plcp_len;
31 __le32 tx_buf; 44 __le32 tx_buf;
32 __le32 frame_len; 45 union{
46 __le32 frame_len;
47 struct {
48 __le16 frame_len_se;
49 __le16 frame_duration;
50 } __packed;
51 } __packed;
33 __le32 next_tx_desc; 52 __le32 next_tx_desc;
34 u8 cw; 53 u8 cw;
35 u8 retry_limit; 54 u8 retry_limit;
36 u8 agc; 55 u8 agc;
37 u8 flags2; 56 u8 flags2;
38 u32 reserved[2]; 57 /* rsvd for 8180/8185.
58 * valid for 8187se but we dont use it
59 */
60 u32 reserved;
61 /* all rsvd for 8180/8185 */
62 __le16 flags3;
63 __le16 frag_qsize;
64} __packed;
65
66struct rtl818x_rx_cmd_desc {
67 __le32 flags;
68 u32 reserved;
69 __le32 rx_buf;
39} __packed; 70} __packed;
40 71
41struct rtl8180_rx_desc { 72struct rtl8180_rx_desc {
42 __le32 flags; 73 __le32 flags;
43 __le32 flags2; 74 __le32 flags2;
44 union { 75 __le64 tsft;
45 __le32 rx_buf; 76
46 __le64 tsft; 77} __packed;
47 }; 78
79struct rtl8187se_rx_desc {
80 __le32 flags;
81 __le64 tsft;
82 __le32 flags2;
83 __le32 flags3;
84 u32 reserved[3];
48} __packed; 85} __packed;
49 86
50struct rtl8180_tx_ring { 87struct rtl8180_tx_ring {
@@ -71,28 +108,45 @@ struct rtl8180_priv {
71 108
72 /* rtl8180 driver specific */ 109 /* rtl8180 driver specific */
73 spinlock_t lock; 110 spinlock_t lock;
74 struct rtl8180_rx_desc *rx_ring; 111 void *rx_ring;
112 u8 rx_ring_sz;
75 dma_addr_t rx_ring_dma; 113 dma_addr_t rx_ring_dma;
76 unsigned int rx_idx; 114 unsigned int rx_idx;
77 struct sk_buff *rx_buf[32]; 115 struct sk_buff *rx_buf[32];
78 struct rtl8180_tx_ring tx_ring[4]; 116 struct rtl8180_tx_ring tx_ring[RTL818X_NR_TX_QUEUES];
79 struct ieee80211_channel channels[14]; 117 struct ieee80211_channel channels[14];
80 struct ieee80211_rate rates[12]; 118 struct ieee80211_rate rates[12];
81 struct ieee80211_supported_band band; 119 struct ieee80211_supported_band band;
120 struct ieee80211_tx_queue_params queue_param[4];
82 struct pci_dev *pdev; 121 struct pci_dev *pdev;
83 u32 rx_conf; 122 u32 rx_conf;
84 123 u8 slot_time;
85 int r8185; 124 u16 ack_time;
125
126 enum {
127 RTL818X_CHIP_FAMILY_RTL8180,
128 RTL818X_CHIP_FAMILY_RTL8185,
129 RTL818X_CHIP_FAMILY_RTL8187SE,
130 } chip_family;
86 u32 anaparam; 131 u32 anaparam;
87 u16 rfparam; 132 u16 rfparam;
88 u8 csthreshold; 133 u8 csthreshold;
89 134 u8 mac_addr[ETH_ALEN];
135 u8 rf_type;
136 u8 xtal_out;
137 u8 xtal_in;
138 u8 xtal_cal;
139 u8 thermal_meter_val;
140 u8 thermal_meter_en;
141 u8 antenna_diversity_en;
142 u8 antenna_diversity_default;
90 /* sequence # */ 143 /* sequence # */
91 u16 seqno; 144 u16 seqno;
92}; 145};
93 146
94void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); 147void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
95void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam); 148void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam);
149void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2);
96 150
97static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr) 151static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr)
98{ 152{
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index d60a5f399022..9bda5bc78eda 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -282,6 +282,7 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
282 282
283 msleep(1); /* FIXME: optional? */ 283 msleep(1); /* FIXME: optional? */
284 284
285 /* TODO: use set_anaparam2 dev.c_func*/
285 /* anaparam2 on */ 286 /* anaparam2 on */
286 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 287 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
287 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); 288 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
@@ -730,32 +731,11 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
730 msleep(10); 731 msleep(10);
731} 732}
732 733
733static void rtl8225_rf_conf_erp(struct ieee80211_hw *dev,
734 struct ieee80211_bss_conf *info)
735{
736 struct rtl8180_priv *priv = dev->priv;
737
738 if (info->use_short_slot) {
739 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
740 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
741 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
742 rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
743 rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73);
744 } else {
745 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
746 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x44);
747 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
748 rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
749 rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5);
750 }
751}
752
753static const struct rtl818x_rf_ops rtl8225_ops = { 734static const struct rtl818x_rf_ops rtl8225_ops = {
754 .name = "rtl8225", 735 .name = "rtl8225",
755 .init = rtl8225_rf_init, 736 .init = rtl8225_rf_init,
756 .stop = rtl8225_rf_stop, 737 .stop = rtl8225_rf_stop,
757 .set_chan = rtl8225_rf_set_channel, 738 .set_chan = rtl8225_rf_set_channel,
758 .conf_erp = rtl8225_rf_conf_erp,
759}; 739};
760 740
761static const struct rtl818x_rf_ops rtl8225z2_ops = { 741static const struct rtl818x_rf_ops rtl8225z2_ops = {
@@ -763,7 +743,6 @@ static const struct rtl818x_rf_ops rtl8225z2_ops = {
763 .init = rtl8225z2_rf_init, 743 .init = rtl8225z2_rf_init,
764 .stop = rtl8225_rf_stop, 744 .stop = rtl8225_rf_stop,
765 .set_chan = rtl8225_rf_set_channel, 745 .set_chan = rtl8225_rf_set_channel,
766 .conf_erp = rtl8225_rf_conf_erp,
767}; 746};
768 747
769const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev) 748const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
new file mode 100644
index 000000000000..fde89866fa8d
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
@@ -0,0 +1,475 @@
1
2/* Radio tuning for RTL8225 on RTL8187SE
3 *
4 * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
5 * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
6 *
7 * Based on the r8180 and Realtek r8187se drivers, which are:
8 * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
9 *
10 * Also based on the rtl8187 driver, which is:
11 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
12 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <net/mac80211.h>
20
21#include "rtl8180.h"
22#include "rtl8225se.h"
23
24#define PFX "rtl8225 (se) "
25
26static const u32 RF_GAIN_TABLE[] = {
27 0x0096, 0x0076, 0x0056, 0x0036, 0x0016, 0x01f6, 0x01d6, 0x01b6,
28 0x0196, 0x0176, 0x00F7, 0x00D7, 0x00B7, 0x0097, 0x0077, 0x0057,
29 0x0037, 0x00FB, 0x00DB, 0x00BB, 0x00FF, 0x00E3, 0x00C3, 0x00A3,
30 0x0083, 0x0063, 0x0043, 0x0023, 0x0003, 0x01E3, 0x01C3, 0x01A3,
31 0x0183, 0x0163, 0x0143, 0x0123, 0x0103
32};
33
34static const u8 cck_ofdm_gain_settings[] = {
35 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
36 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
37 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
38 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
39 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
40 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
41};
42
43static const u8 rtl8225se_tx_gain_cck_ofdm[] = {
44 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
45};
46
47static const u8 rtl8225se_tx_power_cck[] = {
48 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
49 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
50 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
51 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
52 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
53 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
54};
55
56static const u8 rtl8225se_tx_power_cck_ch14[] = {
57 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
58 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
59 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
60 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
61 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
62 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
63};
64
65static const u8 rtl8225se_tx_power_ofdm[] = {
66 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
67};
68
69static const u32 rtl8225se_chan[] = {
70 0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
71 0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
72};
73
74static const u8 rtl8225sez2_tx_power_cck_ch14[] = {
75 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
76};
77
78static const u8 rtl8225sez2_tx_power_cck_B[] = {
79 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04
80};
81
82static const u8 rtl8225sez2_tx_power_cck_A[] = {
83 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04
84};
85
86static const u8 rtl8225sez2_tx_power_cck[] = {
87 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
88};
89
90static const u8 ZEBRA_AGC[] = {
91 0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A,
92 0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
93 0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A,
94 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62,
95 0x48, 0x47, 0x46, 0x45, 0x44, 0x29, 0x28, 0x27,
96 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07,
97 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
99 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
100 0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16,
101 0x17, 0x17, 0x18, 0x18, 0x19, 0x1a, 0x1a, 0x1b,
102 0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e,
103 0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21,
104 0x21, 0x21, 0x22, 0x22, 0x22, 0x23, 0x23, 0x24,
105 0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27,
106 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F
107};
108
109static const u8 OFDM_CONFIG[] = {
110 0x10, 0x0F, 0x0A, 0x0C, 0x14, 0xFA, 0xFF, 0x50,
111 0x00, 0x50, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00,
112 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xA8, 0x26,
113 0x32, 0x33, 0x06, 0xA5, 0x6F, 0x55, 0xC8, 0xBB,
114 0x0A, 0xE1, 0x2C, 0x4A, 0x86, 0x83, 0x34, 0x00,
115 0x4F, 0x24, 0x6F, 0xC2, 0x03, 0x40, 0x80, 0x00,
116 0xC0, 0xC1, 0x58, 0xF1, 0x00, 0xC4, 0x90, 0x3e,
117 0xD8, 0x3C, 0x7B, 0x10, 0x10
118};
119
120static void rtl8187se_three_wire_io(struct ieee80211_hw *dev, u8 *data,
121 u8 len, bool write)
122{
123 struct rtl8180_priv *priv = dev->priv;
124 int i;
125 u8 tmp;
126
127 do {
128 for (i = 0; i < 5; i++) {
129 tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
130 if (!(tmp & 0x3))
131 break;
132 udelay(10);
133 }
134 if (i == 5)
135 wiphy_err(dev->wiphy, PFX
136 "CmdReg: 0x%x RE/WE bits aren't clear\n", tmp);
137
138 tmp = rtl818x_ioread8(priv, &priv->map->rf_sw_config) | 0x02;
139 rtl818x_iowrite8(priv, &priv->map->rf_sw_config, tmp);
140
141 tmp = rtl818x_ioread8(priv, REG_ADDR1(0x84)) & 0xF7;
142 rtl818x_iowrite8(priv, REG_ADDR1(0x84), tmp);
143 if (write) {
144 if (len == 16) {
145 rtl818x_iowrite16(priv, SW_3W_DB0,
146 *(u16 *)data);
147 } else if (len == 64) {
148 rtl818x_iowrite32(priv, SW_3W_DB0_4,
149 *((u32 *)data));
150 rtl818x_iowrite32(priv, SW_3W_DB1_4,
151 *((u32 *)(data + 4)));
152 } else
153 wiphy_err(dev->wiphy, PFX
154 "Unimplemented length\n");
155 } else {
156 rtl818x_iowrite16(priv, SW_3W_DB0, *(u16 *)data);
157 }
158 if (write)
159 tmp = 2;
160 else
161 tmp = 1;
162 rtl818x_iowrite8(priv, SW_3W_CMD1, tmp);
163 for (i = 0; i < 5; i++) {
164 tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
165 if (!(tmp & 0x3))
166 break;
167 udelay(10);
168 }
169 rtl818x_iowrite8(priv, SW_3W_CMD1, 0);
170 if (!write) {
171 *((u16 *)data) = rtl818x_ioread16(priv, SI_DATA_REG);
172 *((u16 *)data) &= 0x0FFF;
173 }
174 } while (0);
175}
176
177static u32 rtl8187se_rf_readreg(struct ieee80211_hw *dev, u8 addr)
178{
179 u32 dataread = addr & 0x0F;
180 rtl8187se_three_wire_io(dev, (u8 *)&dataread, 16, 0);
181 return dataread;
182}
183
184static void rtl8187se_rf_writereg(struct ieee80211_hw *dev, u8 addr, u32 data)
185{
186 u32 outdata = (data << 4) | (u32)(addr & 0x0F);
187 rtl8187se_three_wire_io(dev, (u8 *)&outdata, 16, 1);
188}
189
190
191static void rtl8225se_write_zebra_agc(struct ieee80211_hw *dev)
192{
193 int i;
194
195 for (i = 0; i < 128; i++) {
196 rtl8225se_write_phy_ofdm(dev, 0xF, ZEBRA_AGC[i]);
197 rtl8225se_write_phy_ofdm(dev, 0xE, i+0x80);
198 rtl8225se_write_phy_ofdm(dev, 0xE, 0);
199 }
200}
201
202static void rtl8187se_write_ofdm_config(struct ieee80211_hw *dev)
203{
204 /* write OFDM_CONFIG table */
205 int i;
206
207 for (i = 0; i < 60; i++)
208 rtl8225se_write_phy_ofdm(dev, i, OFDM_CONFIG[i]);
209
210}
211
212static void rtl8225sez2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
213{
214 struct rtl8180_priv *priv = dev->priv;
215 u8 cck_power, ofdm_power;
216
217 cck_power = priv->channels[channel - 1].hw_value & 0xFF;
218 if (cck_power > 35)
219 cck_power = 35;
220 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
221 cck_ofdm_gain_settings[cck_power]);
222
223 usleep_range(1000, 5000);
224 ofdm_power = priv->channels[channel - 1].hw_value >> 8;
225 if (ofdm_power > 35)
226 ofdm_power = 35;
227
228 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
229 cck_ofdm_gain_settings[ofdm_power]);
230 if (ofdm_power < 12) {
231 rtl8225se_write_phy_ofdm(dev, 7, 0x5C);
232 rtl8225se_write_phy_ofdm(dev, 9, 0x5C);
233 }
234 if (ofdm_power < 18) {
235 rtl8225se_write_phy_ofdm(dev, 7, 0x54);
236 rtl8225se_write_phy_ofdm(dev, 9, 0x54);
237 } else {
238 rtl8225se_write_phy_ofdm(dev, 7, 0x50);
239 rtl8225se_write_phy_ofdm(dev, 9, 0x50);
240 }
241
242 usleep_range(1000, 5000);
243}
244
245static void rtl8187se_write_rf_gain(struct ieee80211_hw *dev)
246{
247 int i;
248
249 for (i = 0; i <= 36; i++) {
250 rtl8187se_rf_writereg(dev, 0x01, i); mdelay(1);
251 rtl8187se_rf_writereg(dev, 0x02, RF_GAIN_TABLE[i]); mdelay(1);
252 }
253}
254
255static void rtl8187se_write_initial_gain(struct ieee80211_hw *dev,
256 int init_gain)
257{
258 switch (init_gain) {
259 default:
260 rtl8225se_write_phy_ofdm(dev, 0x17, 0x26); mdelay(1);
261 rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
262 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
263 break;
264 case 2:
265 rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
266 rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
267 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
268 break;
269 case 3:
270 rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
271 rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
272 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
273 break;
274 case 4:
275 rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
276 rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
277 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
278 break;
279 case 5:
280 rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
281 rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
282 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
283 break;
284 case 6:
285 rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
286 rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
287 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
288 break;
289 case 7:
290 rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
291 rtl8225se_write_phy_ofdm(dev, 0x24, 0xA6); mdelay(1);
292 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
293 break;
294 case 8:
295 rtl8225se_write_phy_ofdm(dev, 0x17, 0x66); mdelay(1);
296 rtl8225se_write_phy_ofdm(dev, 0x24, 0xB6); mdelay(1);
297 rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
298 break;
299 }
300}
301
302void rtl8225se_rf_init(struct ieee80211_hw *dev)
303{
304 struct rtl8180_priv *priv = dev->priv;
305 u32 rf23, rf24;
306 u8 d_cut = 0;
307 u8 tmp;
308
309 /* Page 1 */
310 rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
311 rf23 = rtl8187se_rf_readreg(dev, 0x08); mdelay(1);
312 rf24 = rtl8187se_rf_readreg(dev, 0x09); mdelay(1);
313 if (rf23 == 0x0818 && rf24 == 0x070C)
314 d_cut = 1;
315
316 wiphy_info(dev->wiphy, "RTL8225-SE version %s\n",
317 d_cut ? "D" : "not-D");
318
319 /* Page 0: reg 0 - 15 */
320 rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
321 rtl8187se_rf_writereg(dev, 0x01, 0x06E0); mdelay(1);
322 rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
323 rtl8187se_rf_writereg(dev, 0x03, 0x07F1); mdelay(1);
324 rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(1);
325 rtl8187se_rf_writereg(dev, 0x05, 0x0C72); mdelay(1);
326 rtl8187se_rf_writereg(dev, 0x06, 0x0AE6); mdelay(1);
327 rtl8187se_rf_writereg(dev, 0x07, 0x00CA); mdelay(1);
328 rtl8187se_rf_writereg(dev, 0x08, 0x0E1C); mdelay(1);
329 rtl8187se_rf_writereg(dev, 0x09, 0x02F0); mdelay(1);
330 rtl8187se_rf_writereg(dev, 0x0A, 0x09D0); mdelay(1);
331 rtl8187se_rf_writereg(dev, 0x0B, 0x01BA); mdelay(1);
332 rtl8187se_rf_writereg(dev, 0x0C, 0x0640); mdelay(1);
333 rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
334 rtl8187se_rf_writereg(dev, 0x0E, 0x0020); mdelay(1);
335 rtl8187se_rf_writereg(dev, 0x0F, 0x0990); mdelay(1);
336 /* page 1: reg 16-30 */
337 rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
338 rtl8187se_rf_writereg(dev, 0x03, 0x0806); mdelay(1);
339 rtl8187se_rf_writereg(dev, 0x04, 0x03A7); mdelay(1);
340 rtl8187se_rf_writereg(dev, 0x05, 0x059B); mdelay(1);
341 rtl8187se_rf_writereg(dev, 0x06, 0x0081); mdelay(1);
342 rtl8187se_rf_writereg(dev, 0x07, 0x01A0); mdelay(1);
343 rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
344 rtl8187se_rf_writereg(dev, 0x0B, 0x0418); mdelay(1);
345 rtl8187se_rf_writereg(dev, 0x0C, 0x0FBE); mdelay(1);
346 rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(1);
347 if (d_cut)
348 rtl8187se_rf_writereg(dev, 0x0E, 0x0807);
349 else
350 rtl8187se_rf_writereg(dev, 0x0E, 0x0806);
351 mdelay(1);
352 rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC); mdelay(1);
353 rtl8187se_rf_writereg(dev, 0x00, 0x01D7); mdelay(1);
354 rtl8187se_rf_writereg(dev, 0x03, 0x0E00); mdelay(1);
355 rtl8187se_rf_writereg(dev, 0x04, 0x0E50); mdelay(1);
356
357 rtl8187se_write_rf_gain(dev);
358
359 rtl8187se_rf_writereg(dev, 0x05, 0x0203); mdelay(1);
360 rtl8187se_rf_writereg(dev, 0x06, 0x0200); mdelay(1);
361 rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
362 rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(11);
363 rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
364 rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
365 rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
366 rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
367 rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
368 rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
369 rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
370 rtl8187se_rf_writereg(dev, 0x07, 0x0220); mdelay(1);
371 rtl8187se_rf_writereg(dev, 0x07, 0x03E0); mdelay(1);
372 rtl8187se_rf_writereg(dev, 0x06, 0x00C1); mdelay(1);
373 rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
374 if (priv->xtal_cal) {
375 tmp = (priv->xtal_in << 4) | (priv->xtal_out << 1) |
376 (1 << 11) | (1 << 9);
377 rtl8187se_rf_writereg(dev, 0x0F, tmp);
378 wiphy_info(dev->wiphy, "Xtal cal\n");
379 mdelay(1);
380 } else {
381 wiphy_info(dev->wiphy, "NO Xtal cal\n");
382 rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC);
383 mdelay(1);
384 }
385 /* page 0 */
386 rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
387 rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
388 rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
389 rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
390 rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
391 rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
392
393 rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
394 rtl8187se_rf_writereg(dev, 0x01, 0x0000); mdelay(1);
395 rtl8187se_rf_writereg(dev, 0x02, 0x0000); mdelay(1);
396 /* power save parameters */
397 /* TODO: move to dev.c */
398 rtl818x_iowrite8(priv, REG_ADDR1(0x024E),
399 rtl818x_ioread8(priv, REG_ADDR1(0x24E)) & 0x9F);
400 rtl8225se_write_phy_cck(dev, 0x00, 0xC8);
401 rtl8225se_write_phy_cck(dev, 0x06, 0x1C);
402 rtl8225se_write_phy_cck(dev, 0x10, 0x78);
403 rtl8225se_write_phy_cck(dev, 0x2E, 0xD0);
404 rtl8225se_write_phy_cck(dev, 0x2F, 0x06);
405 rtl8225se_write_phy_cck(dev, 0x01, 0x46);
406
407 /* power control */
408 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x10);
409 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x1B);
410
411 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
412 rtl8225se_write_phy_ofdm(dev, 0x00, 0x12);
413
414 rtl8225se_write_zebra_agc(dev);
415
416 rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
417
418 rtl8187se_write_ofdm_config(dev);
419
420 /* turn on RF */
421 rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
422 rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
423 /* turn on RF again */
424 rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
425 rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
426 /* turn on BB */
427 rtl8225se_write_phy_ofdm(dev, 0x10, 0x40);
428 rtl8225se_write_phy_ofdm(dev, 0x12, 0x40);
429
430 rtl8187se_write_initial_gain(dev, 4);
431}
432
433void rtl8225se_rf_stop(struct ieee80211_hw *dev)
434{
435 /* checked for 8187se */
436 struct rtl8180_priv *priv = dev->priv;
437
438 /* turn off BB RXIQ matrix to cut off rx signal */
439 rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
440 rtl8225se_write_phy_ofdm(dev, 0x12, 0x00);
441 /* turn off RF */
442 rtl8187se_rf_writereg(dev, 0x04, 0x0000);
443 rtl8187se_rf_writereg(dev, 0x00, 0x0000);
444
445 usleep_range(1000, 5000);
446 /* turn off A/D and D/A */
447 rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_OFF);
448 rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_OFF);
449}
450
451void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
452 struct ieee80211_conf *conf)
453{
454 int chan =
455 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
456
457 rtl8225sez2_rf_set_tx_power(dev, chan);
458 rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
459 if ((rtl8187se_rf_readreg(dev, 0x7) & 0x0F80) !=
460 rtl8225se_chan[chan - 1])
461 rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
462 usleep_range(10000, 20000);
463}
464
465static const struct rtl818x_rf_ops rtl8225se_ops = {
466 .name = "rtl8225-se",
467 .init = rtl8225se_rf_init,
468 .stop = rtl8225se_rf_stop,
469 .set_chan = rtl8225se_rf_set_channel,
470};
471
472const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *dev)
473{
474 return &rtl8225se_ops;
475}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
new file mode 100644
index 000000000000..229400264088
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
@@ -0,0 +1,61 @@
1
2/* Definitions for RTL8187SE hardware
3 *
4 * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
5 * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
6 *
7 * Based on the r8180 and Realtek r8187se drivers, which are:
8 * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
9 *
10 * Also based on the rtl8187 driver, which is:
11 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
12 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#ifndef RTL8187SE_RTL8225_H
20#define RTL8187SE_RTL8225_H
21
22#define RTL8225SE_ANAPARAM_ON 0xb0054d00
23#define RTL8225SE_ANAPARAM2_ON 0x000004c6
24
25/* all off except PLL */
26#define RTL8225SE_ANAPARAM_OFF 0xb0054dec
27/* all on including PLL */
28#define RTL8225SE_ANAPARAM_OFF2 0xb0054dfc
29
30#define RTL8225SE_ANAPARAM2_OFF 0x00ff04c6
31
32#define RTL8225SE_ANAPARAM3 0x10
33
34enum rtl8187se_power_state {
35 RTL8187SE_POWER_ON,
36 RTL8187SE_POWER_OFF,
37 RTL8187SE_POWER_SLEEP
38};
39
40static inline void rtl8225se_write_phy_ofdm(struct ieee80211_hw *dev,
41 u8 addr, u8 data)
42{
43 rtl8180_write_phy(dev, addr, data);
44}
45
46static inline void rtl8225se_write_phy_cck(struct ieee80211_hw *dev,
47 u8 addr, u8 data)
48{
49 rtl8180_write_phy(dev, addr, data | 0x10000);
50}
51
52
53const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *);
54void rtl8225se_rf_stop(struct ieee80211_hw *dev);
55void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
56 struct ieee80211_conf *conf);
57void rtl8225se_rf_conf_erp(struct ieee80211_hw *dev,
58 struct ieee80211_bss_conf *info);
59void rtl8225se_rf_init(struct ieee80211_hw *dev);
60
61#endif /* RTL8187SE_RTL8225_H */
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index fd78df813a85..0ca17cda48fa 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -592,7 +592,7 @@ static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
592 rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam); 592 rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
593 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2); 593 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
594 if (priv->is_rtl8187b) 594 if (priv->is_rtl8187b)
595 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3); 595 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3A, anaparam3);
596 reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE; 596 reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
597 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg); 597 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
598 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, 598 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -785,7 +785,7 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
785 rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF); 785 rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
786 786
787 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); 787 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
788 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; 788 reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
789 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); 789 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
790 790
791 /* Auto Rate Fallback Register (ARFR): 1M-54M setting */ 791 /* Auto Rate Fallback Register (ARFR): 1M-54M setting */
@@ -943,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
943 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); 943 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
944 944
945 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); 945 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
946 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT; 946 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
947 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT; 947 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
948 reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT; 948 reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
949 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); 949 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
950 950
@@ -986,13 +986,13 @@ static int rtl8187_start(struct ieee80211_hw *dev)
986 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); 986 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
987 987
988 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); 988 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
989 reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT; 989 reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
990 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; 990 reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
991 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); 991 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
992 992
993 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); 993 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
994 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT; 994 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
995 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT; 995 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
996 reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT; 996 reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
997 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); 997 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
998 998
@@ -1636,10 +1636,10 @@ static int rtl8187_probe(struct usb_interface *intf,
1636 1636
1637 err_free_dmabuf: 1637 err_free_dmabuf:
1638 kfree(priv->io_dmabuf); 1638 kfree(priv->io_dmabuf);
1639 err_free_dev:
1640 ieee80211_free_hw(dev);
1641 usb_set_intfdata(intf, NULL); 1639 usb_set_intfdata(intf, NULL);
1642 usb_put_dev(udev); 1640 usb_put_dev(udev);
1641 err_free_dev:
1642 ieee80211_free_hw(dev);
1643 return err; 1643 return err;
1644} 1644}
1645 1645
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index ce23dfd42381..45ea4e1c4abe 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -16,30 +16,82 @@
16#define RTL818X_H 16#define RTL818X_H
17 17
18struct rtl818x_csr { 18struct rtl818x_csr {
19
19 u8 MAC[6]; 20 u8 MAC[6];
20 u8 reserved_0[2]; 21 u8 reserved_0[2];
21 __le32 MAR[2]; 22
22 u8 RX_FIFO_COUNT; 23 union {
23 u8 reserved_1; 24 __le32 MAR[2]; /* 0x8 */
24 u8 TX_FIFO_COUNT; 25
25 u8 BQREQ; 26 struct{ /* rtl8187se */
26 u8 reserved_2[4]; 27 u8 rf_sw_config; /* 0x8 */
28 u8 reserved_01[3];
29 __le32 TMGDA; /* 0xc */
30 } __packed;
31 } __packed;
32
33 union { /* 0x10 */
34 struct {
35 u8 RX_FIFO_COUNT;
36 u8 reserved_1;
37 u8 TX_FIFO_COUNT;
38 u8 BQREQ;
39 } __packed;
40
41 __le32 TBKDA; /* for 8187se */
42 } __packed;
43
44 __le32 TBEDA; /* 0x14 - for rtl8187se */
45
27 __le32 TSFT[2]; 46 __le32 TSFT[2];
28 __le32 TLPDA; 47
29 __le32 TNPDA; 48 union { /* 0x20 */
30 __le32 THPDA; 49 __le32 TLPDA;
31 __le16 BRSR; 50 __le32 TVIDA; /* for 8187se */
32 u8 BSSID[6]; 51 } __packed;
33 u8 RESP_RATE; 52
34 u8 EIFS; 53 union { /* 0x24 */
35 u8 reserved_3[1]; 54 __le32 TNPDA;
36 u8 CMD; 55 __le32 TVODA; /* for 8187se */
56 } __packed;
57
58 /* hi pri ring for all cards */
59 __le32 THPDA; /* 0x28 */
60
61 union { /* 0x2c */
62 struct {
63 u8 reserved_2a;
64 u8 EIFS_8187SE;
65 } __packed;
66
67 __le16 BRSR;
68 } __packed;
69
70 u8 BSSID[6]; /* 0x2e */
71
72 union { /* 0x34 */
73 struct {
74 u8 RESP_RATE;
75 u8 EIFS;
76 } __packed;
77 __le16 BRSR_8187SE;
78 } __packed;
79
80 u8 reserved_3[1]; /* 0x36 */
81 u8 CMD; /* 0x37 */
37#define RTL818X_CMD_TX_ENABLE (1 << 2) 82#define RTL818X_CMD_TX_ENABLE (1 << 2)
38#define RTL818X_CMD_RX_ENABLE (1 << 3) 83#define RTL818X_CMD_RX_ENABLE (1 << 3)
39#define RTL818X_CMD_RESET (1 << 4) 84#define RTL818X_CMD_RESET (1 << 4)
40 u8 reserved_4[4]; 85 u8 reserved_4[4]; /* 0x38 */
41 __le16 INT_MASK; 86 union {
42 __le16 INT_STATUS; 87 struct {
88 __le16 INT_MASK;
89 __le16 INT_STATUS;
90 } __packed;
91
92 __le32 INT_STATUS_SE; /* 0x3c */
93 } __packed;
94/* status bits for rtl8187 and rtl8180/8185 */
43#define RTL818X_INT_RX_OK (1 << 0) 95#define RTL818X_INT_RX_OK (1 << 0)
44#define RTL818X_INT_RX_ERR (1 << 1) 96#define RTL818X_INT_RX_ERR (1 << 1)
45#define RTL818X_INT_TXL_OK (1 << 2) 97#define RTL818X_INT_TXL_OK (1 << 2)
@@ -56,7 +108,34 @@ struct rtl818x_csr {
56#define RTL818X_INT_BEACON (1 << 13) 108#define RTL818X_INT_BEACON (1 << 13)
57#define RTL818X_INT_TIME_OUT (1 << 14) 109#define RTL818X_INT_TIME_OUT (1 << 14)
58#define RTL818X_INT_TX_FO (1 << 15) 110#define RTL818X_INT_TX_FO (1 << 15)
59 __le32 TX_CONF; 111/* status bits for rtl8187se */
112#define RTL818X_INT_SE_TIMER3 (1 << 0)
113#define RTL818X_INT_SE_TIMER2 (1 << 1)
114#define RTL818X_INT_SE_RQ0SOR (1 << 2)
115#define RTL818X_INT_SE_TXBED_OK (1 << 3)
116#define RTL818X_INT_SE_TXBED_ERR (1 << 4)
117#define RTL818X_INT_SE_TXBE_OK (1 << 5)
118#define RTL818X_INT_SE_TXBE_ERR (1 << 6)
119#define RTL818X_INT_SE_RX_OK (1 << 7)
120#define RTL818X_INT_SE_RX_ERR (1 << 8)
121#define RTL818X_INT_SE_TXL_OK (1 << 9)
122#define RTL818X_INT_SE_TXL_ERR (1 << 10)
123#define RTL818X_INT_SE_RX_DU (1 << 11)
124#define RTL818X_INT_SE_RX_FIFO (1 << 12)
125#define RTL818X_INT_SE_TXN_OK (1 << 13)
126#define RTL818X_INT_SE_TXN_ERR (1 << 14)
127#define RTL818X_INT_SE_TXH_OK (1 << 15)
128#define RTL818X_INT_SE_TXH_ERR (1 << 16)
129#define RTL818X_INT_SE_TXB_OK (1 << 17)
130#define RTL818X_INT_SE_TXB_ERR (1 << 18)
131#define RTL818X_INT_SE_ATIM_TO (1 << 19)
132#define RTL818X_INT_SE_BK_TO (1 << 20)
133#define RTL818X_INT_SE_TIMER1 (1 << 21)
134#define RTL818X_INT_SE_TX_FIFO (1 << 22)
135#define RTL818X_INT_SE_WAKEUP (1 << 23)
136#define RTL818X_INT_SE_BK_DMA (1 << 24)
137#define RTL818X_INT_SE_TMGD_OK (1 << 30)
138 __le32 TX_CONF; /* 0x40 */
60#define RTL818X_TX_CONF_LOOPBACK_MAC (1 << 17) 139#define RTL818X_TX_CONF_LOOPBACK_MAC (1 << 17)
61#define RTL818X_TX_CONF_LOOPBACK_CONT (3 << 17) 140#define RTL818X_TX_CONF_LOOPBACK_CONT (3 << 17)
62#define RTL818X_TX_CONF_NO_ICV (1 << 19) 141#define RTL818X_TX_CONF_NO_ICV (1 << 19)
@@ -68,6 +147,7 @@ struct rtl818x_csr {
68#define RTL818X_TX_CONF_R8185_D (5 << 25) 147#define RTL818X_TX_CONF_R8185_D (5 << 25)
69#define RTL818X_TX_CONF_R8187vD (5 << 25) 148#define RTL818X_TX_CONF_R8187vD (5 << 25)
70#define RTL818X_TX_CONF_R8187vD_B (6 << 25) 149#define RTL818X_TX_CONF_R8187vD_B (6 << 25)
150#define RTL818X_TX_CONF_RTL8187SE (6 << 25)
71#define RTL818X_TX_CONF_HWVER_MASK (7 << 25) 151#define RTL818X_TX_CONF_HWVER_MASK (7 << 25)
72#define RTL818X_TX_CONF_DISREQQSIZE (1 << 28) 152#define RTL818X_TX_CONF_DISREQQSIZE (1 << 28)
73#define RTL818X_TX_CONF_PROBE_DTS (1 << 29) 153#define RTL818X_TX_CONF_PROBE_DTS (1 << 29)
@@ -122,31 +202,67 @@ struct rtl818x_csr {
122 u8 PGSELECT; 202 u8 PGSELECT;
123 u8 SECURITY; 203 u8 SECURITY;
124 __le32 ANAPARAM2; 204 __le32 ANAPARAM2;
125 u8 reserved_10[12]; 205 u8 reserved_10[8];
126 __le16 BEACON_INTERVAL; 206 __le32 IMR; /* 0x6c - Interrupt mask reg for 8187se */
127 __le16 ATIM_WND; 207#define IMR_TMGDOK ((1 << 30))
128 __le16 BEACON_INTERVAL_TIME; 208#define IMR_DOT11HINT ((1 << 25)) /* 802.11h Measurement Interrupt */
129 __le16 ATIMTR_INTERVAL; 209#define IMR_BCNDMAINT ((1 << 24)) /* Beacon DMA Interrupt */
130 u8 PHY_DELAY; 210#define IMR_WAKEINT ((1 << 23)) /* Wake Up Interrupt */
131 u8 CARRIER_SENSE_COUNTER; 211#define IMR_TXFOVW ((1 << 22)) /* Tx FIFO Overflow */
132 u8 reserved_11[2]; 212#define IMR_TIMEOUT1 ((1 << 21)) /* Time Out Interrupt 1 */
133 u8 PHY[4]; 213#define IMR_BCNINT ((1 << 20)) /* Beacon Time out */
134 __le16 RFPinsOutput; 214#define IMR_ATIMINT ((1 << 19)) /* ATIM Time Out */
135 __le16 RFPinsEnable; 215#define IMR_TBDER ((1 << 18)) /* Tx Beacon Descriptor Error */
136 __le16 RFPinsSelect; 216#define IMR_TBDOK ((1 << 17)) /* Tx Beacon Descriptor OK */
137 __le16 RFPinsInput; 217#define IMR_THPDER ((1 << 16)) /* Tx High Priority Descriptor Error */
138 __le32 RF_PARA; 218#define IMR_THPDOK ((1 << 15)) /* Tx High Priority Descriptor OK */
139 __le32 RF_TIMING; 219#define IMR_TVODER ((1 << 14)) /* Tx AC_VO Descriptor Error Int */
140 u8 GP_ENABLE; 220#define IMR_TVODOK ((1 << 13)) /* Tx AC_VO Descriptor OK Interrupt */
141 u8 GPIO0; 221#define IMR_FOVW ((1 << 12)) /* Rx FIFO Overflow Interrupt */
142 u8 GPIO1; 222#define IMR_RDU ((1 << 11)) /* Rx Descriptor Unavailable */
143 u8 reserved_12; 223#define IMR_TVIDER ((1 << 10)) /* Tx AC_VI Descriptor Error */
144 __le32 HSSI_PARA; 224#define IMR_TVIDOK ((1 << 9)) /* Tx AC_VI Descriptor OK Interrupt */
145 u8 reserved_13[4]; 225#define IMR_RER ((1 << 8)) /* Rx Error Interrupt */
146 u8 TX_AGC_CTL; 226#define IMR_ROK ((1 << 7)) /* Receive OK Interrupt */
147#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT (1 << 0) 227#define IMR_TBEDER ((1 << 6)) /* Tx AC_BE Descriptor Error */
148#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT (1 << 1) 228#define IMR_TBEDOK ((1 << 5)) /* Tx AC_BE Descriptor OK */
149#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2) 229#define IMR_TBKDER ((1 << 4)) /* Tx AC_BK Descriptor Error */
230#define IMR_TBKDOK ((1 << 3)) /* Tx AC_BK Descriptor OK */
231#define IMR_RQOSOK ((1 << 2)) /* Rx QoS OK Interrupt */
232#define IMR_TIMEOUT2 ((1 << 1)) /* Time Out Interrupt 2 */
233#define IMR_TIMEOUT3 ((1 << 0)) /* Time Out Interrupt 3 */
234 __le16 BEACON_INTERVAL; /* 0x70 */
235 __le16 ATIM_WND; /* 0x72 */
236 __le16 BEACON_INTERVAL_TIME; /* 0x74 */
237 __le16 ATIMTR_INTERVAL; /* 0x76 */
238 u8 PHY_DELAY; /* 0x78 */
239 u8 CARRIER_SENSE_COUNTER; /* 0x79 */
240 u8 reserved_11[2]; /* 0x7a */
241 u8 PHY[4]; /* 0x7c */
242 __le16 RFPinsOutput; /* 0x80 */
243 __le16 RFPinsEnable; /* 0x82 */
244 __le16 RFPinsSelect; /* 0x84 */
245 __le16 RFPinsInput; /* 0x86 */
246 __le32 RF_PARA; /* 0x88 */
247 __le32 RF_TIMING; /* 0x8c */
248 u8 GP_ENABLE; /* 0x90 */
249 u8 GPIO0; /* 0x91 */
250 u8 GPIO1; /* 0x92 */
251 u8 TPPOLL_STOP; /* 0x93 - rtl8187se only */
252#define RTL818x_TPPOLL_STOP_BQ (1 << 7)
253#define RTL818x_TPPOLL_STOP_VI (1 << 4)
254#define RTL818x_TPPOLL_STOP_VO (1 << 5)
255#define RTL818x_TPPOLL_STOP_BE (1 << 3)
256#define RTL818x_TPPOLL_STOP_BK (1 << 2)
257#define RTL818x_TPPOLL_STOP_MG (1 << 1)
258#define RTL818x_TPPOLL_STOP_HI (1 << 6)
259
260 __le32 HSSI_PARA; /* 0x94 */
261 u8 reserved_13[4]; /* 0x98 */
262 u8 TX_AGC_CTL; /* 0x9c */
263#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN (1 << 0)
264#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL (1 << 1)
265#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
150 u8 TX_GAIN_CCK; 266 u8 TX_GAIN_CCK;
151 u8 TX_GAIN_OFDM; 267 u8 TX_GAIN_OFDM;
152 u8 TX_ANTENNA; 268 u8 TX_ANTENNA;
@@ -158,8 +274,8 @@ struct rtl818x_csr {
158 u8 SLOT; 274 u8 SLOT;
159 u8 reserved_16[5]; 275 u8 reserved_16[5];
160 u8 CW_CONF; 276 u8 CW_CONF;
161#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT (1 << 0) 277#define RTL818X_CW_CONF_PERPACKET_CW (1 << 0)
162#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT (1 << 1) 278#define RTL818X_CW_CONF_PERPACKET_RETRY (1 << 1)
163 u8 CW_VAL; 279 u8 CW_VAL;
164 u8 RATE_FALLBACK; 280 u8 RATE_FALLBACK;
165#define RTL818X_RATE_FALLBACK_ENABLE (1 << 7) 281#define RTL818X_RATE_FALLBACK_ENABLE (1 << 7)
@@ -167,7 +283,8 @@ struct rtl818x_csr {
167 u8 reserved_17[24]; 283 u8 reserved_17[24];
168 u8 CONFIG5; 284 u8 CONFIG5;
169 u8 TX_DMA_POLLING; 285 u8 TX_DMA_POLLING;
170 u8 reserved_18[2]; 286 u8 PHY_PR;
287 u8 reserved_18;
171 __le16 CWR; 288 __le16 CWR;
172 u8 RETRY_CTR; 289 u8 RETRY_CTR;
173 u8 reserved_19[3]; 290 u8 reserved_19[3];
@@ -179,20 +296,64 @@ struct rtl818x_csr {
179 __le32 RDSAR; 296 __le32 RDSAR;
180 __le16 TID_AC_MAP; 297 __le16 TID_AC_MAP;
181 u8 reserved_20[4]; 298 u8 reserved_20[4];
182 u8 ANAPARAM3; 299 union {
183 u8 reserved_21[5]; 300 __le16 ANAPARAM3; /* 0xee */
184 __le16 FEMR; 301 u8 ANAPARAM3A; /* for rtl8187 */
185 u8 reserved_22[4]; 302 };
186 __le16 TALLY_CNT; 303
187 u8 TALLY_SEL; 304#define AC_PARAM_TXOP_LIMIT_SHIFT 16
305#define AC_PARAM_ECW_MAX_SHIFT 12
306#define AC_PARAM_ECW_MIN_SHIFT 8
307#define AC_PARAM_AIFS_SHIFT 0
308
309 __le32 AC_VO_PARAM; /* 0xf0 */
310
311 union { /* 0xf4 */
312 __le32 AC_VI_PARAM;
313 __le16 FEMR;
314 } __packed;
315
316 union{ /* 0xf8 */
317 __le32 AC_BE_PARAM; /* rtl8187se */
318 struct{
319 u8 reserved_21[2];
320 __le16 TALLY_CNT; /* 0xfa */
321 } __packed;
322 } __packed;
323
324 union {
325 u8 TALLY_SEL; /* 0xfc */
326 __le32 AC_BK_PARAM;
327
328 } __packed;
329
188} __packed; 330} __packed;
189 331
332/* These are addresses with NON-standard usage.
333 * They have offsets very far from this struct.
334 * I don't like to introduce a ton of "reserved"..
335 * They are for RTL8187SE
336 */
337#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr)
338#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1))
339#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2))
340
341#define FEMR_SE REG_ADDR2(0x1D4)
342#define ARFR REG_ADDR2(0x1E0)
343#define RFSW_CTRL REG_ADDR2(0x272)
344#define SW_3W_DB0 REG_ADDR2(0x274)
345#define SW_3W_DB0_4 REG_ADDR4(0x274)
346#define SW_3W_DB1 REG_ADDR2(0x278)
347#define SW_3W_DB1_4 REG_ADDR4(0x278)
348#define SW_3W_CMD1 REG_ADDR1(0x27D)
349#define PI_DATA_REG REG_ADDR2(0x360)
350#define SI_DATA_REG REG_ADDR2(0x362)
351
190struct rtl818x_rf_ops { 352struct rtl818x_rf_ops {
191 char *name; 353 char *name;
192 void (*init)(struct ieee80211_hw *); 354 void (*init)(struct ieee80211_hw *);
193 void (*stop)(struct ieee80211_hw *); 355 void (*stop)(struct ieee80211_hw *);
194 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); 356 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
195 void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *);
196 u8 (*calc_rssi)(u8 agc, u8 sq); 357 u8 (*calc_rssi)(u8 agc, u8 sq);
197}; 358};
198 359
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index c2ffce7a907c..bf3cf124e4ea 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -5,7 +5,7 @@ menuconfig RTL_CARDS
5 ---help--- 5 ---help---
6 This option will enable support for the Realtek mac80211-based 6 This option will enable support for the Realtek mac80211-based
7 wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de, 7 wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
8 rtl8723eu, and rtl8188eu share some common code. 8 rtl8723ae, rtl8723be, and rtl8188ae share some common code.
9 9
10if RTL_CARDS 10if RTL_CARDS
11 11
@@ -48,12 +48,27 @@ config RTL8723AE
48 depends on PCI 48 depends on PCI
49 select RTLWIFI 49 select RTLWIFI
50 select RTLWIFI_PCI 50 select RTLWIFI_PCI
51 select RTL8723_COMMON
52 select RTLBTCOEXIST
51 ---help--- 53 ---help---
52 This is the driver for Realtek RTL8723AE 802.11n PCIe 54 This is the driver for Realtek RTL8723AE 802.11n PCIe
53 wireless network adapters. 55 wireless network adapters.
54 56
55 If you choose to build it as a module, it will be called rtl8723ae 57 If you choose to build it as a module, it will be called rtl8723ae
56 58
59config RTL8723BE
60 tristate "Realtek RTL8723BE PCIe Wireless Network Adapter"
61 depends on PCI
62 select RTLWIFI
63 select RTLWIFI_PCI
64 select RTL8723_COMMON
65 select RTLBTCOEXIST
66 ---help---
67 This is the driver for Realtek RTL8723BE 802.11n PCIe
68 wireless network adapters.
69
70 If you choose to build it as a module, it will be called rtl8723be
71
57config RTL8188EE 72config RTL8188EE
58 tristate "Realtek RTL8188EE Wireless Network Adapter" 73 tristate "Realtek RTL8188EE Wireless Network Adapter"
59 depends on PCI 74 depends on PCI
@@ -101,4 +116,14 @@ config RTL8192C_COMMON
101 depends on RTL8192CE || RTL8192CU 116 depends on RTL8192CE || RTL8192CU
102 default y 117 default y
103 118
119config RTL8723_COMMON
120 tristate
121 depends on RTL8723AE || RTL8723BE
122 default y
123
124config RTLBTCOEXIST
125 tristate
126 depends on RTL8723AE || RTL8723BE
127 default y
128
104endif 129endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index d56f023a4b90..bba36a06abcc 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -24,6 +24,9 @@ obj-$(CONFIG_RTL8192CU) += rtl8192cu/
24obj-$(CONFIG_RTL8192SE) += rtl8192se/ 24obj-$(CONFIG_RTL8192SE) += rtl8192se/
25obj-$(CONFIG_RTL8192DE) += rtl8192de/ 25obj-$(CONFIG_RTL8192DE) += rtl8192de/
26obj-$(CONFIG_RTL8723AE) += rtl8723ae/ 26obj-$(CONFIG_RTL8723AE) += rtl8723ae/
27obj-$(CONFIG_RTL8723BE) += rtl8723be/
27obj-$(CONFIG_RTL8188EE) += rtl8188ee/ 28obj-$(CONFIG_RTL8188EE) += rtl8188ee/
29obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/
30obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/
28 31
29ccflags-y += -D__CHECK_ENDIAN__ 32ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/rtlwifi/btcoexist/Makefile
new file mode 100644
index 000000000000..47ceecfcb7dc
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/Makefile
@@ -0,0 +1,7 @@
1btcoexist-objs := halbtc8723b2ant.o \
2 halbtcoutsrc.o \
3 rtl_btc.o
4
5obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
6
7ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
new file mode 100644
index 000000000000..d76684eb24d0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
@@ -0,0 +1,75 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 * Larry Finger <Larry.Finger@lwfinger.net>
22 *
23 ******************************************************************************/
24
25#ifndef __HALBT_PRECOMP_H__
26#define __HALBT_PRECOMP_H__
27/*************************************************************
28 * include files
29 *************************************************************/
30#include "../wifi.h"
31#include "../efuse.h"
32#include "../base.h"
33#include "../regd.h"
34#include "../cam.h"
35#include "../ps.h"
36#include "../pci.h"
37
38#include "halbtcoutsrc.h"
39
40#include "halbtc8723b2ant.h"
41
42#define BIT0 0x00000001
43#define BIT1 0x00000002
44#define BIT2 0x00000004
45#define BIT3 0x00000008
46#define BIT4 0x00000010
47#define BIT5 0x00000020
48#define BIT6 0x00000040
49#define BIT7 0x00000080
50#define BIT8 0x00000100
51#define BIT9 0x00000200
52#define BIT10 0x00000400
53#define BIT11 0x00000800
54#define BIT12 0x00001000
55#define BIT13 0x00002000
56#define BIT14 0x00004000
57#define BIT15 0x00008000
58#define BIT16 0x00010000
59#define BIT17 0x00020000
60#define BIT18 0x00040000
61#define BIT19 0x00080000
62#define BIT20 0x00100000
63#define BIT21 0x00200000
64#define BIT22 0x00400000
65#define BIT23 0x00800000
66#define BIT24 0x01000000
67#define BIT25 0x02000000
68#define BIT26 0x04000000
69#define BIT27 0x08000000
70#define BIT28 0x10000000
71#define BIT29 0x20000000
72#define BIT30 0x40000000
73#define BIT31 0x80000000
74
75#endif /* __HALBT_PRECOMP_H__ */
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
new file mode 100644
index 000000000000..d916ab9f3c38
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -0,0 +1,3698 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25/***************************************************************
26 * Description:
27 *
28 * This file is for RTL8723B Co-exist mechanism
29 *
30 * History
31 * 2012/11/15 Cosa first check in.
32 *
33 **************************************************************/
34/**************************************************************
35 * include files
36 **************************************************************/
37#include "halbt_precomp.h"
38/**************************************************************
39 * Global variables, these are static variables
40 **************************************************************/
41static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant;
42static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant;
43static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant;
44static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant;
45
46static const char *const glbt_info_src_8723b_2ant[] = {
47 "BT Info[wifi fw]",
48 "BT Info[bt rsp]",
49 "BT Info[bt auto report]",
50};
51
52static u32 glcoex_ver_date_8723b_2ant = 20130731;
53static u32 glcoex_ver_8723b_2ant = 0x3b;
54
55/**************************************************************
56 * local function proto type if needed
57 **************************************************************/
58/**************************************************************
59 * local function start with btc8723b2ant_
60 **************************************************************/
61static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
62 u8 rssi_thresh1)
63{
64 s32 bt_rssi = 0;
65 u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
66
67 bt_rssi = coex_sta->bt_rssi;
68
69 if (level_num == 2) {
70 if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
71 (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
72 if (bt_rssi >= rssi_thresh +
73 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
74 bt_rssi_state = BTC_RSSI_STATE_HIGH;
75 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
76 "[BTCoex], BT Rssi state "
77 "switch to High\n");
78 } else {
79 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
80 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
81 "[BTCoex], BT Rssi state "
82 "stay at Low\n");
83 }
84 } else {
85 if (bt_rssi < rssi_thresh) {
86 bt_rssi_state = BTC_RSSI_STATE_LOW;
87 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
88 "[BTCoex], BT Rssi state "
89 "switch to Low\n");
90 } else {
91 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
92 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
93 "[BTCoex], BT Rssi state "
94 "stay at High\n");
95 }
96 }
97 } else if (level_num == 3) {
98 if (rssi_thresh > rssi_thresh1) {
99 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
100 "[BTCoex], BT Rssi thresh error!!\n");
101 return coex_sta->pre_bt_rssi_state;
102 }
103
104 if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
105 (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
106 if (bt_rssi >= rssi_thresh +
107 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
108 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
109 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
110 "[BTCoex], BT Rssi state "
111 "switch to Medium\n");
112 } else {
113 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
114 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
115 "[BTCoex], BT Rssi state "
116 "stay at Low\n");
117 }
118 } else if ((coex_sta->pre_bt_rssi_state ==
119 BTC_RSSI_STATE_MEDIUM) ||
120 (coex_sta->pre_bt_rssi_state ==
121 BTC_RSSI_STATE_STAY_MEDIUM)) {
122 if (bt_rssi >= rssi_thresh1 +
123 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
124 bt_rssi_state = BTC_RSSI_STATE_HIGH;
125 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
126 "[BTCoex], BT Rssi state "
127 "switch to High\n");
128 } else if (bt_rssi < rssi_thresh) {
129 bt_rssi_state = BTC_RSSI_STATE_LOW;
130 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
131 "[BTCoex], BT Rssi state "
132 "switch to Low\n");
133 } else {
134 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
135 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
136 "[BTCoex], BT Rssi state "
137 "stay at Medium\n");
138 }
139 } else {
140 if (bt_rssi < rssi_thresh1) {
141 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
142 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
143 "[BTCoex], BT Rssi state "
144 "switch to Medium\n");
145 } else {
146 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
147 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
148 "[BTCoex], BT Rssi state "
149 "stay at High\n");
150 }
151 }
152 }
153
154 coex_sta->pre_bt_rssi_state = bt_rssi_state;
155
156 return bt_rssi_state;
157}
158
159static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
160 u8 index, u8 level_num,
161 u8 rssi_thresh, u8 rssi_thresh1)
162{
163 s32 wifi_rssi = 0;
164 u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
165
166 btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
167
168 if (level_num == 2) {
169 if ((coex_sta->pre_wifi_rssi_state[index] ==
170 BTC_RSSI_STATE_LOW) ||
171 (coex_sta->pre_wifi_rssi_state[index] ==
172 BTC_RSSI_STATE_STAY_LOW)) {
173 if (wifi_rssi >= rssi_thresh +
174 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
175 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
176 BTC_PRINT(BTC_MSG_ALGORITHM,
177 ALGO_WIFI_RSSI_STATE,
178 "[BTCoex], wifi RSSI state "
179 "switch to High\n");
180 } else {
181 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
182 BTC_PRINT(BTC_MSG_ALGORITHM,
183 ALGO_WIFI_RSSI_STATE,
184 "[BTCoex], wifi RSSI state "
185 "stay at Low\n");
186 }
187 } else {
188 if (wifi_rssi < rssi_thresh) {
189 wifi_rssi_state = BTC_RSSI_STATE_LOW;
190 BTC_PRINT(BTC_MSG_ALGORITHM,
191 ALGO_WIFI_RSSI_STATE,
192 "[BTCoex], wifi RSSI state "
193 "switch to Low\n");
194 } else {
195 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
196 BTC_PRINT(BTC_MSG_ALGORITHM,
197 ALGO_WIFI_RSSI_STATE,
198 "[BTCoex], wifi RSSI state "
199 "stay at High\n");
200 }
201 }
202 } else if (level_num == 3) {
203 if (rssi_thresh > rssi_thresh1) {
204 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
205 "[BTCoex], wifi RSSI thresh error!!\n");
206 return coex_sta->pre_wifi_rssi_state[index];
207 }
208
209 if ((coex_sta->pre_wifi_rssi_state[index] ==
210 BTC_RSSI_STATE_LOW) ||
211 (coex_sta->pre_wifi_rssi_state[index] ==
212 BTC_RSSI_STATE_STAY_LOW)) {
213 if (wifi_rssi >= rssi_thresh +
214 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
215 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
216 BTC_PRINT(BTC_MSG_ALGORITHM,
217 ALGO_WIFI_RSSI_STATE,
218 "[BTCoex], wifi RSSI state "
219 "switch to Medium\n");
220 } else {
221 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
222 BTC_PRINT(BTC_MSG_ALGORITHM,
223 ALGO_WIFI_RSSI_STATE,
224 "[BTCoex], wifi RSSI state "
225 "stay at Low\n");
226 }
227 } else if ((coex_sta->pre_wifi_rssi_state[index] ==
228 BTC_RSSI_STATE_MEDIUM) ||
229 (coex_sta->pre_wifi_rssi_state[index] ==
230 BTC_RSSI_STATE_STAY_MEDIUM)) {
231 if (wifi_rssi >= rssi_thresh1 +
232 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
233 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
234 BTC_PRINT(BTC_MSG_ALGORITHM,
235 ALGO_WIFI_RSSI_STATE,
236 "[BTCoex], wifi RSSI state "
237 "switch to High\n");
238 } else if (wifi_rssi < rssi_thresh) {
239 wifi_rssi_state = BTC_RSSI_STATE_LOW;
240 BTC_PRINT(BTC_MSG_ALGORITHM,
241 ALGO_WIFI_RSSI_STATE,
242 "[BTCoex], wifi RSSI state "
243 "switch to Low\n");
244 } else {
245 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
246 BTC_PRINT(BTC_MSG_ALGORITHM,
247 ALGO_WIFI_RSSI_STATE,
248 "[BTCoex], wifi RSSI state "
249 "stay at Medium\n");
250 }
251 } else {
252 if (wifi_rssi < rssi_thresh1) {
253 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
254 BTC_PRINT(BTC_MSG_ALGORITHM,
255 ALGO_WIFI_RSSI_STATE,
256 "[BTCoex], wifi RSSI state "
257 "switch to Medium\n");
258 } else {
259 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
260 BTC_PRINT(BTC_MSG_ALGORITHM,
261 ALGO_WIFI_RSSI_STATE,
262 "[BTCoex], wifi RSSI state "
263 "stay at High\n");
264 }
265 }
266 }
267
268 coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
269
270 return wifi_rssi_state;
271}
272
273static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
274{
275 u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
276 u32 reg_hp_tx = 0, reg_hp_rx = 0;
277 u32 reg_lp_tx = 0, reg_lp_rx = 0;
278
279 reg_hp_txrx = 0x770;
280 reg_lp_txrx = 0x774;
281
282 u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
283 reg_hp_tx = u32tmp & MASKLWORD;
284 reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
285
286 u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
287 reg_lp_tx = u32tmp & MASKLWORD;
288 reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
289
290 coex_sta->high_priority_tx = reg_hp_tx;
291 coex_sta->high_priority_rx = reg_hp_rx;
292 coex_sta->low_priority_tx = reg_lp_tx;
293 coex_sta->low_priority_rx = reg_lp_rx;
294
295 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
296 "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
297 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
298 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
299 "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
300 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
301
302 /* reset counter */
303 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
304}
305
306static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
307{
308 static bool pre_wifi_busy;
309 static bool pre_under_4way;
310 static bool pre_bt_hs_on;
311 bool wifi_busy = false, under_4way = false, bt_hs_on = false;
312 bool wifi_connected = false;
313
314 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
315 &wifi_connected);
316 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
317 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
318 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
319 &under_4way);
320
321 if (wifi_connected) {
322 if (wifi_busy != pre_wifi_busy) {
323 pre_wifi_busy = wifi_busy;
324 return true;
325 }
326
327 if (under_4way != pre_under_4way) {
328 pre_under_4way = under_4way;
329 return true;
330 }
331
332 if (bt_hs_on != pre_bt_hs_on) {
333 pre_bt_hs_on = bt_hs_on;
334 return true;
335 }
336 }
337
338 return false;
339}
340
341static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
342{
343 /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
344 struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
345 bool bt_hs_on = false;
346
347#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
348 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
349
350 bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
351 bt_link_info->sco_exist = coex_sta->sco_exist;
352 bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
353 bt_link_info->pan_exist = coex_sta->pan_exist;
354 bt_link_info->hid_exist = coex_sta->hid_exist;
355
356 /* work around for HS mode. */
357 if (bt_hs_on) {
358 bt_link_info->pan_exist = true;
359 bt_link_info->bt_link_exist = true;
360 }
361#else /* profile from bt stack */
362 bt_link_info->bt_link_exist = stack_info->bt_link_exist;
363 bt_link_info->sco_exist = stack_info->sco_exist;
364 bt_link_info->a2dp_exist = stack_info->a2dp_exist;
365 bt_link_info->pan_exist = stack_info->pan_exist;
366 bt_link_info->hid_exist = stack_info->hid_exist;
367
368 /*for win-8 stack HID report error*/
369 if (!stack_info->hid_exist)
370 stack_info->hid_exist = coex_sta->hid_exist;
371 /*sync BTInfo with BT firmware and stack*/
372 /* when stack HID report error, here we use the info from bt fw.*/
373 if (!stack_info->bt_link_exist)
374 stack_info->bt_link_exist = coex_sta->bt_link_exist;
375#endif
376 /* check if Sco only */
377 if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
378 !bt_link_info->pan_exist && !bt_link_info->hid_exist)
379 bt_link_info->sco_only = true;
380 else
381 bt_link_info->sco_only = false;
382
383 /* check if A2dp only */
384 if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
385 !bt_link_info->pan_exist && !bt_link_info->hid_exist)
386 bt_link_info->a2dp_only = true;
387 else
388 bt_link_info->a2dp_only = false;
389
390 /* check if Pan only */
391 if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
392 bt_link_info->pan_exist && !bt_link_info->hid_exist)
393 bt_link_info->pan_only = true;
394 else
395 bt_link_info->pan_only = false;
396
397 /* check if Hid only */
398 if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
399 !bt_link_info->pan_exist && bt_link_info->hid_exist)
400 bt_link_info->hid_only = true;
401 else
402 bt_link_info->hid_only = false;
403}
404
405static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
406{
407 struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
408 bool bt_hs_on = false;
409 u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
410 u8 num_of_diff_profile = 0;
411
412 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
413
414 if (!bt_link_info->bt_link_exist) {
415 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
416 "[BTCoex], No BT link exists!!!\n");
417 return algorithm;
418 }
419
420 if (bt_link_info->sco_exist)
421 num_of_diff_profile++;
422 if (bt_link_info->hid_exist)
423 num_of_diff_profile++;
424 if (bt_link_info->pan_exist)
425 num_of_diff_profile++;
426 if (bt_link_info->a2dp_exist)
427 num_of_diff_profile++;
428
429 if (num_of_diff_profile == 1) {
430 if (bt_link_info->sco_exist) {
431 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
432 "[BTCoex], SCO only\n");
433 algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
434 } else {
435 if (bt_link_info->hid_exist) {
436 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
437 "[BTCoex], HID only\n");
438 algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
439 } else if (bt_link_info->a2dp_exist) {
440 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
441 "[BTCoex], A2DP only\n");
442 algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
443 } else if (bt_link_info->pan_exist) {
444 if (bt_hs_on) {
445 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
446 "[BTCoex], PAN(HS) only\n");
447 algorithm =
448 BT_8723B_2ANT_COEX_ALGO_PANHS;
449 } else {
450 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
451 "[BTCoex], PAN(EDR) only\n");
452 algorithm =
453 BT_8723B_2ANT_COEX_ALGO_PANEDR;
454 }
455 }
456 }
457 } else if (num_of_diff_profile == 2) {
458 if (bt_link_info->sco_exist) {
459 if (bt_link_info->hid_exist) {
460 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
461 "[BTCoex], SCO + HID\n");
462 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
463 } else if (bt_link_info->a2dp_exist) {
464 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
465 "[BTCoex], SCO + A2DP ==> SCO\n");
466 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
467 } else if (bt_link_info->pan_exist) {
468 if (bt_hs_on) {
469 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
470 "[BTCoex], SCO + PAN(HS)\n");
471 algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
472 } else {
473 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
474 "[BTCoex], SCO + PAN(EDR)\n");
475 algorithm =
476 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
477 }
478 }
479 } else {
480 if (bt_link_info->hid_exist &&
481 bt_link_info->a2dp_exist) {
482 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
483 "[BTCoex], HID + A2DP\n");
484 algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
485 } else if (bt_link_info->hid_exist &&
486 bt_link_info->pan_exist) {
487 if (bt_hs_on) {
488 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
489 "[BTCoex], HID + PAN(HS)\n");
490 algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
491 } else {
492 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
493 "[BTCoex], HID + PAN(EDR)\n");
494 algorithm =
495 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
496 }
497 } else if (bt_link_info->pan_exist &&
498 bt_link_info->a2dp_exist) {
499 if (bt_hs_on) {
500 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
501 "[BTCoex], A2DP + PAN(HS)\n");
502 algorithm =
503 BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
504 } else {
505 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
506 "[BTCoex],A2DP + PAN(EDR)\n");
507 algorithm =
508 BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
509 }
510 }
511 }
512 } else if (num_of_diff_profile == 3) {
513 if (bt_link_info->sco_exist) {
514 if (bt_link_info->hid_exist &&
515 bt_link_info->a2dp_exist) {
516 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
517 "[BTCoex], SCO + HID + A2DP"
518 " ==> HID\n");
519 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
520 } else if (bt_link_info->hid_exist &&
521 bt_link_info->pan_exist) {
522 if (bt_hs_on) {
523 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
524 "[BTCoex], SCO + HID + "
525 "PAN(HS)\n");
526 algorithm =
527 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
528 } else {
529 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
530 "[BTCoex], SCO + HID + "
531 "PAN(EDR)\n");
532 algorithm =
533 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
534 }
535 } else if (bt_link_info->pan_exist &&
536 bt_link_info->a2dp_exist) {
537 if (bt_hs_on) {
538 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
539 "[BTCoex], SCO + A2DP + "
540 "PAN(HS)\n");
541 algorithm =
542 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
543 } else {
544 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
545 "[BTCoex], SCO + A2DP + "
546 "PAN(EDR) ==> HID\n");
547 algorithm =
548 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
549 }
550 }
551 } else {
552 if (bt_link_info->hid_exist &&
553 bt_link_info->pan_exist &&
554 bt_link_info->a2dp_exist) {
555 if (bt_hs_on) {
556 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
557 "[BTCoex], HID + A2DP + "
558 "PAN(HS)\n");
559 algorithm =
560 BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
561 } else {
562 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
563 "[BTCoex], HID + A2DP + "
564 "PAN(EDR)\n");
565 algorithm =
566 BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
567 }
568 }
569 }
570 } else if (num_of_diff_profile >= 3) {
571 if (bt_link_info->sco_exist) {
572 if (bt_link_info->hid_exist &&
573 bt_link_info->pan_exist &&
574 bt_link_info->a2dp_exist) {
575 if (bt_hs_on) {
576 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
577 "[BTCoex], Error!!! SCO + HID"
578 " + A2DP + PAN(HS)\n");
579 } else {
580 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
581 "[BTCoex], SCO + HID + A2DP +"
582 " PAN(EDR)==>PAN(EDR)+HID\n");
583 algorithm =
584 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
585 }
586 }
587 }
588 }
589 return algorithm;
590}
591
592static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
593{
594 bool ret = false;
595 bool bt_hs_on = false, wifi_connected = false;
596 s32 bt_hs_rssi = 0;
597 u8 bt_rssi_state;
598
599 if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
600 return false;
601 if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
602 &wifi_connected))
603 return false;
604 if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
605 return false;
606
607 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
608
609 if (wifi_connected) {
610 if (bt_hs_on) {
611 if (bt_hs_rssi > 37) {
612 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
613 "[BTCoex], Need to decrease bt "
614 "power for HS mode!!\n");
615 ret = true;
616 }
617 } else {
618 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
619 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
620 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
621 "[BTCoex], Need to decrease bt "
622 "power for Wifi is connected!!\n");
623 ret = true;
624 }
625 }
626 }
627
628 return ret;
629}
630
631static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
632 u8 dac_swing_lvl)
633{
634 u8 h2c_parameter[1] = {0};
635
636 /* There are several type of dacswing
637 * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
638 */
639 h2c_parameter[0] = dac_swing_lvl;
640
641 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
642 "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
643 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
644 "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
645
646 btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
647}
648
649static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
650 bool dec_bt_pwr)
651{
652 u8 h2c_parameter[1] = {0};
653
654 h2c_parameter[0] = 0;
655
656 if (dec_bt_pwr)
657 h2c_parameter[0] |= BIT1;
658
659 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
660 "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
661 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
662
663 btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
664}
665
666static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
667 bool force_exec, bool dec_bt_pwr)
668{
669 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
670 "[BTCoex], %s Dec BT power = %s\n",
671 (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
672 coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
673
674 if (!force_exec) {
675 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
676 "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
677 coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
678
679 if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
680 return;
681 }
682 btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
683
684 coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
685}
686
687static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
688 bool force_exec, u8 fw_dac_swing_lvl)
689{
690 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
691 "[BTCoex], %s set FW Dac Swing level = %d\n",
692 (force_exec ? "force to" : ""), fw_dac_swing_lvl);
693 coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
694
695 if (!force_exec) {
696 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
697 "[BTCoex], preFwDacSwingLvl=%d, "
698 "curFwDacSwingLvl=%d\n",
699 coex_dm->pre_fw_dac_swing_lvl,
700 coex_dm->cur_fw_dac_swing_lvl);
701
702 if (coex_dm->pre_fw_dac_swing_lvl ==
703 coex_dm->cur_fw_dac_swing_lvl)
704 return;
705 }
706
707 btc8723b2ant_set_fw_dac_swing_level(btcoexist,
708 coex_dm->cur_fw_dac_swing_lvl);
709 coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
710}
711
712static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
713 bool rx_rf_shrink_on)
714{
715 if (rx_rf_shrink_on) {
716 /* Shrink RF Rx LPF corner */
717 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
718 "[BTCoex], Shrink RF Rx LPF corner!!\n");
719 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
720 0xfffff, 0xffffc);
721 } else {
722 /* Resume RF Rx LPF corner */
723 /* After initialized, we can use coex_dm->btRf0x1eBackup */
724 if (btcoexist->initilized) {
725 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
726 "[BTCoex], Resume RF Rx LPF corner!!\n");
727 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
728 0xfffff,
729 coex_dm->bt_rf0x1e_backup);
730 }
731 }
732}
733
734static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
735 bool force_exec, bool rx_rf_shrink_on)
736{
737 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
738 "[BTCoex], %s turn Rx RF Shrink = %s\n",
739 (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
740 "ON" : "OFF"));
741 coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
742
743 if (!force_exec) {
744 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
745 "[BTCoex], bPreRfRxLpfShrink=%d, "
746 "bCurRfRxLpfShrink=%d\n",
747 coex_dm->pre_rf_rx_lpf_shrink,
748 coex_dm->cur_rf_rx_lpf_shrink);
749
750 if (coex_dm->pre_rf_rx_lpf_shrink ==
751 coex_dm->cur_rf_rx_lpf_shrink)
752 return;
753 }
754 btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
755 coex_dm->cur_rf_rx_lpf_shrink);
756
757 coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
758}
759
760static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
761 bool low_penalty_ra)
762{
763 u8 h2c_parameter[6] = {0};
764
765 h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
766
767 if (low_penalty_ra) {
768 h2c_parameter[1] |= BIT0;
769 /*normal rate except MCS7/6/5, OFDM54/48/36*/
770 h2c_parameter[2] = 0x00;
771 h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/
772 h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/
773 h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
774 }
775
776 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
777 "[BTCoex], set WiFi Low-Penalty Retry: %s",
778 (low_penalty_ra ? "ON!!" : "OFF!!"));
779
780 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
781}
782
783static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
784 bool force_exec, bool low_penalty_ra)
785{
786 /*return; */
787 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
788 "[BTCoex], %s turn LowPenaltyRA = %s\n",
789 (force_exec ? "force to" : ""), (low_penalty_ra ?
790 "ON" : "OFF"));
791 coex_dm->cur_low_penalty_ra = low_penalty_ra;
792
793 if (!force_exec) {
794 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
795 "[BTCoex], bPreLowPenaltyRa=%d, "
796 "bCurLowPenaltyRa=%d\n",
797 coex_dm->pre_low_penalty_ra,
798 coex_dm->cur_low_penalty_ra);
799
800 if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
801 return;
802 }
803 btc8723b_set_penalty_txrate(btcoexist, coex_dm->cur_low_penalty_ra);
804
805 coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
806}
807
808static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
809 u32 level)
810{
811 u8 val = (u8) level;
812 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
813 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
814 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
815}
816
817static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex,
818 bool sw_dac_swing_on,
819 u32 sw_dac_swing_lvl)
820{
821 if (sw_dac_swing_on)
822 btc8723b2ant_set_dac_swing_reg(btcoex, sw_dac_swing_lvl);
823 else
824 btc8723b2ant_set_dac_swing_reg(btcoex, 0x18);
825}
826
827
828static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
829 bool force_exec, bool dac_swing_on,
830 u32 dac_swing_lvl)
831{
832 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
833 "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
834 (force_exec ? "force to" : ""),
835 (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
836 coex_dm->cur_dac_swing_on = dac_swing_on;
837 coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
838
839 if (!force_exec) {
840 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
841 "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
842 " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
843 coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
844 coex_dm->cur_dac_swing_on,
845 coex_dm->cur_dac_swing_lvl);
846
847 if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
848 (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
849 return;
850 }
851 mdelay(30);
852 btc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on,
853 dac_swing_lvl);
854
855 coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
856 coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
857}
858
859static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
860 bool agc_table_en)
861{
862 u8 rssi_adjust_val = 0;
863
864 /* BB AGC Gain Table */
865 if (agc_table_en) {
866 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
867 "[BTCoex], BB Agc Table On!\n");
868 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
869 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
870 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
871 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
872 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
873 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
874 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
875 } else {
876 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
877 "[BTCoex], BB Agc Table Off!\n");
878 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
879 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
880 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
881 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
882 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
883 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
884 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
885 }
886
887
888 /* RF Gain */
889 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
890 if (agc_table_en) {
891 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
892 "[BTCoex], Agc Table On!\n");
893 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
894 0xfffff, 0x38fff);
895 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
896 0xfffff, 0x38ffe);
897 } else {
898 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
899 "[BTCoex], Agc Table Off!\n");
900 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
901 0xfffff, 0x380c3);
902 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
903 0xfffff, 0x28ce6);
904 }
905 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
906
907 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
908
909 if (agc_table_en) {
910 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
911 "[BTCoex], Agc Table On!\n");
912 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
913 0xfffff, 0x38fff);
914 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
915 0xfffff, 0x38ffe);
916 } else {
917 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
918 "[BTCoex], Agc Table Off!\n");
919 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
920 0xfffff, 0x380c3);
921 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
922 0xfffff, 0x28ce6);
923 }
924 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
925
926 /* set rssiAdjustVal for wifi module. */
927 if (agc_table_en)
928 rssi_adjust_val = 8;
929 btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
930 &rssi_adjust_val);
931}
932
933static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
934 bool force_exec, bool agc_table_en)
935{
936 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
937 "[BTCoex], %s %s Agc Table\n",
938 (force_exec ? "force to" : ""),
939 (agc_table_en ? "Enable" : "Disable"));
940 coex_dm->cur_agc_table_en = agc_table_en;
941
942 if (!force_exec) {
943 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
944 "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
945 coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
946
947 if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
948 return;
949 }
950 btc8723b2ant_set_agc_table(btcoexist, agc_table_en);
951
952 coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
953}
954
955static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
956 u32 val0x6c0, u32 val0x6c4,
957 u32 val0x6c8, u8 val0x6cc)
958{
959 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
960 "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
961 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
962
963 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
964 "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
965 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
966
967 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
968 "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
969 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
970
971 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
972 "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
973 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
974}
975
976static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
977 bool force_exec, u32 val0x6c0,
978 u32 val0x6c4, u32 val0x6c8,
979 u8 val0x6cc)
980{
981 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
982 "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
983 " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
984 (force_exec ? "force to" : ""), val0x6c0,
985 val0x6c4, val0x6c8, val0x6cc);
986 coex_dm->cur_val0x6c0 = val0x6c0;
987 coex_dm->cur_val0x6c4 = val0x6c4;
988 coex_dm->cur_val0x6c8 = val0x6c8;
989 coex_dm->cur_val0x6cc = val0x6cc;
990
991 if (!force_exec) {
992 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
993 "[BTCoex], preVal0x6c0=0x%x, "
994 "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
995 "preVal0x6cc=0x%x !!\n",
996 coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
997 coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
998 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
999 "[BTCoex], curVal0x6c0=0x%x, "
1000 "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
1001 "curVal0x6cc=0x%x !!\n",
1002 coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
1003 coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
1004
1005 if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
1006 (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
1007 (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
1008 (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
1009 return;
1010 }
1011 btc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
1012 val0x6c8, val0x6cc);
1013
1014 coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
1015 coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
1016 coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
1017 coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
1018}
1019
1020static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
1021 bool force_exec, u8 type)
1022{
1023 switch (type) {
1024 case 0:
1025 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
1026 0x55555555, 0xffff, 0x3);
1027 break;
1028 case 1:
1029 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
1030 0x5afa5afa, 0xffff, 0x3);
1031 break;
1032 case 2:
1033 btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
1034 0x5a5a5a5a, 0xffff, 0x3);
1035 break;
1036 case 3:
1037 btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
1038 0xaaaaaaaa, 0xffff, 0x3);
1039 break;
1040 case 4:
1041 btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
1042 0xffffffff, 0xffff, 0x3);
1043 break;
1044 case 5:
1045 btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
1046 0x5fff5fff, 0xffff, 0x3);
1047 break;
1048 case 6:
1049 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
1050 0x5a5a5a5a, 0xffff, 0x3);
1051 break;
1052 case 7:
1053 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
1054 0x5afa5afa, 0xffff, 0x3);
1055 break;
1056 case 8:
1057 btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
1058 0x5aea5aea, 0xffff, 0x3);
1059 break;
1060 case 9:
1061 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
1062 0x5aea5aea, 0xffff, 0x3);
1063 break;
1064 case 10:
1065 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
1066 0x5aff5aff, 0xffff, 0x3);
1067 break;
1068 case 11:
1069 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
1070 0x5a5f5a5f, 0xffff, 0x3);
1071 break;
1072 case 12:
1073 btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
1074 0x5f5f5f5f, 0xffff, 0x3);
1075 break;
1076 default:
1077 break;
1078 }
1079}
1080
1081static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
1082 bool enable)
1083{
1084 u8 h2c_parameter[1] = {0};
1085
1086 if (enable)
1087 h2c_parameter[0] |= BIT0;/* function enable*/
1088
1089 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
1090 "[BTCoex], set FW for BT Ignore Wlan_Act, "
1091 "FW write 0x63=0x%x\n", h2c_parameter[0]);
1092
1093 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
1094}
1095
1096static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
1097 bool force_exec, bool enable)
1098{
1099 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
1100 "[BTCoex], %s turn Ignore WlanAct %s\n",
1101 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
1102 coex_dm->cur_ignore_wlan_act = enable;
1103
1104 if (!force_exec) {
1105 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
1106 "[BTCoex], bPreIgnoreWlanAct = %d, "
1107 "bCurIgnoreWlanAct = %d!!\n",
1108 coex_dm->pre_ignore_wlan_act,
1109 coex_dm->cur_ignore_wlan_act);
1110
1111 if (coex_dm->pre_ignore_wlan_act ==
1112 coex_dm->cur_ignore_wlan_act)
1113 return;
1114 }
1115 btc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
1116
1117 coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
1118}
1119
1120static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
1121 u8 byte2, u8 byte3, u8 byte4, u8 byte5)
1122{
1123 u8 h2c_parameter[5];
1124
1125 h2c_parameter[0] = byte1;
1126 h2c_parameter[1] = byte2;
1127 h2c_parameter[2] = byte3;
1128 h2c_parameter[3] = byte4;
1129 h2c_parameter[4] = byte5;
1130
1131 coex_dm->ps_tdma_para[0] = byte1;
1132 coex_dm->ps_tdma_para[1] = byte2;
1133 coex_dm->ps_tdma_para[2] = byte3;
1134 coex_dm->ps_tdma_para[3] = byte4;
1135 coex_dm->ps_tdma_para[4] = byte5;
1136
1137 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
1138 "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
1139 h2c_parameter[0],
1140 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
1141 h2c_parameter[3] << 8 | h2c_parameter[4]);
1142
1143 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
1144}
1145
1146static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
1147 bool shrink_rx_lpf, bool low_penalty_ra,
1148 bool limited_dig, bool bt_lna_constrain)
1149{
1150 btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
1151 btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
1152}
1153
1154static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
1155 bool agc_table_shift, bool adc_backoff,
1156 bool sw_dac_swing, u32 dac_swing_lvl)
1157{
1158 btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
1159 btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
1160 dac_swing_lvl);
1161}
1162
1163static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
1164 bool turn_on, u8 type)
1165{
1166 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
1167 "[BTCoex], %s turn %s PS TDMA, type=%d\n",
1168 (force_exec ? "force to" : ""),
1169 (turn_on ? "ON" : "OFF"), type);
1170 coex_dm->cur_ps_tdma_on = turn_on;
1171 coex_dm->cur_ps_tdma = type;
1172
1173 if (!force_exec) {
1174 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
1175 "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
1176 coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
1177 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
1178 "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
1179 coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
1180
1181 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
1182 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
1183 return;
1184 }
1185 if (turn_on) {
1186 switch (type) {
1187 case 1:
1188 default:
1189 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
1190 0x1a, 0xe1, 0x90);
1191 break;
1192 case 2:
1193 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
1194 0x12, 0xe1, 0x90);
1195 break;
1196 case 3:
1197 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
1198 0x3, 0xf1, 0x90);
1199 break;
1200 case 4:
1201 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
1202 0x03, 0xf1, 0x90);
1203 break;
1204 case 5:
1205 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
1206 0x1a, 0x60, 0x90);
1207 break;
1208 case 6:
1209 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
1210 0x12, 0x60, 0x90);
1211 break;
1212 case 7:
1213 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
1214 0x3, 0x70, 0x90);
1215 break;
1216 case 8:
1217 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
1218 0x3, 0x70, 0x90);
1219 break;
1220 case 9:
1221 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
1222 0x1a, 0xe1, 0x90);
1223 break;
1224 case 10:
1225 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
1226 0x12, 0xe1, 0x90);
1227 break;
1228 case 11:
1229 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
1230 0xa, 0xe1, 0x90);
1231 break;
1232 case 12:
1233 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
1234 0x5, 0xe1, 0x90);
1235 break;
1236 case 13:
1237 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
1238 0x1a, 0x60, 0x90);
1239 break;
1240 case 14:
1241 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
1242 0x12, 0x60, 0x90);
1243 break;
1244 case 15:
1245 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
1246 0xa, 0x60, 0x90);
1247 break;
1248 case 16:
1249 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
1250 0x5, 0x60, 0x90);
1251 break;
1252 case 17:
1253 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
1254 0x2f, 0x60, 0x90);
1255 break;
1256 case 18:
1257 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
1258 0x5, 0xe1, 0x90);
1259 break;
1260 case 19:
1261 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
1262 0x25, 0xe1, 0x90);
1263 break;
1264 case 20:
1265 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
1266 0x25, 0x60, 0x90);
1267 break;
1268 case 21:
1269 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
1270 0x03, 0x70, 0x90);
1271 break;
1272 case 71:
1273 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
1274 0x1a, 0xe1, 0x90);
1275 break;
1276 }
1277 } else {
1278 /* disable PS tdma */
1279 switch (type) {
1280 case 0:
1281 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
1282 0x40, 0x0);
1283 break;
1284 case 1:
1285 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
1286 0x48, 0x0);
1287 break;
1288 default:
1289 btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
1290 0x40, 0x0);
1291 break;
1292 }
1293 }
1294
1295 /* update pre state */
1296 coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
1297 coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
1298}
1299
1300static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
1301{
1302 /* fw all off */
1303 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1304 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
1305 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
1306
1307 /* sw all off */
1308 btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
1309 btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
1310
1311 /* hw all off */
1312 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
1313 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
1314}
1315
1316static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
1317{
1318 /* force to reset coex mechanism*/
1319
1320 btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
1321 btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
1322 btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
1323
1324 btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
1325 btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
1326}
1327
1328static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
1329{
1330 bool wifi_connected = false;
1331 bool low_pwr_disable = true;
1332
1333 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
1334 &low_pwr_disable);
1335 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
1336 &wifi_connected);
1337
1338 if (wifi_connected) {
1339 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
1340 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
1341 } else {
1342 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
1343 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1344 }
1345 btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
1346 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
1347
1348 btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
1349 btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
1350
1351 coex_dm->need_recover_0x948 = true;
1352 coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
1353
1354 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
1355}
1356
1357static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
1358{
1359 bool common = false, wifi_connected = false;
1360 bool wifi_busy = false;
1361 bool bt_hs_on = false, low_pwr_disable = false;
1362
1363 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
1364 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
1365 &wifi_connected);
1366 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
1367
1368 if (!wifi_connected) {
1369 low_pwr_disable = false;
1370 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
1371 &low_pwr_disable);
1372
1373 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
1374 "[BTCoex], Wifi non-connected idle!!\n");
1375
1376 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
1377 0x0);
1378 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
1379 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1380 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
1381 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
1382
1383 btc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
1384 false);
1385 btc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
1386 0x18);
1387
1388 common = true;
1389 } else {
1390 if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
1391 coex_dm->bt_status) {
1392 low_pwr_disable = false;
1393 btcoexist->btc_set(btcoexist,
1394 BTC_SET_ACT_DISABLE_LOW_POWER,
1395 &low_pwr_disable);
1396
1397 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
1398 "[BTCoex], Wifi connected + "
1399 "BT non connected-idle!!\n");
1400
1401 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
1402 0xfffff, 0x0);
1403 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
1404 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1405 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
1406 0xb);
1407 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
1408 false);
1409
1410 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
1411 false, false);
1412 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
1413 false, 0x18);
1414
1415 common = true;
1416 } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
1417 coex_dm->bt_status) {
1418 low_pwr_disable = true;
1419 btcoexist->btc_set(btcoexist,
1420 BTC_SET_ACT_DISABLE_LOW_POWER,
1421 &low_pwr_disable);
1422
1423 if (bt_hs_on)
1424 return false;
1425 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
1426 "[BTCoex], Wifi connected + "
1427 "BT connected-idle!!\n");
1428
1429 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
1430 0xfffff, 0x0);
1431 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
1432 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1433 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
1434 0xb);
1435 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
1436 false);
1437
1438 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
1439 false, false);
1440 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
1441 false, 0x18);
1442
1443 common = true;
1444 } else {
1445 low_pwr_disable = true;
1446 btcoexist->btc_set(btcoexist,
1447 BTC_SET_ACT_DISABLE_LOW_POWER,
1448 &low_pwr_disable);
1449
1450 if (wifi_busy) {
1451 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
1452 "[BTCoex], Wifi Connected-Busy + "
1453 "BT Busy!!\n");
1454 common = false;
1455 } else {
1456 if (bt_hs_on)
1457 return false;
1458
1459 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
1460 "[BTCoex], Wifi Connected-Idle + "
1461 "BT Busy!!\n");
1462
1463 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
1464 0x1, 0xfffff, 0x0);
1465 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC,
1466 7);
1467 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1468 true, 21);
1469 btc8723b2ant_fw_dac_swing_lvl(btcoexist,
1470 NORMAL_EXEC,
1471 0xb);
1472 if (btc8723b_need_dec_pwr(btcoexist))
1473 btc8723b2ant_dec_bt_pwr(btcoexist,
1474 NORMAL_EXEC,
1475 true);
1476 else
1477 btc8723b2ant_dec_bt_pwr(btcoexist,
1478 NORMAL_EXEC,
1479 false);
1480 btc8723b2ant_sw_mechanism1(btcoexist, false,
1481 false, false,
1482 false);
1483 btc8723b2ant_sw_mechanism2(btcoexist, false,
1484 false, false,
1485 0x18);
1486 common = true;
1487 }
1488 }
1489 }
1490
1491 return common;
1492}
1493
1494static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
1495 s32 result)
1496{
1497 /* Set PS TDMA for max interval == 1 */
1498 if (tx_pause) {
1499 BTC_PRINT(BTC_MSG_ALGORITHM,
1500 ALGO_TRACE_FW_DETAIL,
1501 "[BTCoex], TxPause = 1\n");
1502
1503 if (coex_dm->cur_ps_tdma == 71) {
1504 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1505 true, 5);
1506 coex_dm->tdma_adj_type = 5;
1507 } else if (coex_dm->cur_ps_tdma == 1) {
1508 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1509 true, 5);
1510 coex_dm->tdma_adj_type = 5;
1511 } else if (coex_dm->cur_ps_tdma == 2) {
1512 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1513 true, 6);
1514 coex_dm->tdma_adj_type = 6;
1515 } else if (coex_dm->cur_ps_tdma == 3) {
1516 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1517 true, 7);
1518 coex_dm->tdma_adj_type = 7;
1519 } else if (coex_dm->cur_ps_tdma == 4) {
1520 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1521 true, 8);
1522 coex_dm->tdma_adj_type = 8;
1523 } else if (coex_dm->cur_ps_tdma == 9) {
1524 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1525 true, 13);
1526 coex_dm->tdma_adj_type = 13;
1527 } else if (coex_dm->cur_ps_tdma == 10) {
1528 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1529 true, 14);
1530 coex_dm->tdma_adj_type = 14;
1531 } else if (coex_dm->cur_ps_tdma == 11) {
1532 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1533 true, 15);
1534 coex_dm->tdma_adj_type = 15;
1535 } else if (coex_dm->cur_ps_tdma == 12) {
1536 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1537 true, 16);
1538 coex_dm->tdma_adj_type = 16;
1539 }
1540
1541 if (result == -1) {
1542 if (coex_dm->cur_ps_tdma == 5) {
1543 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1544 true, 6);
1545 coex_dm->tdma_adj_type = 6;
1546 } else if (coex_dm->cur_ps_tdma == 6) {
1547 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1548 true, 7);
1549 coex_dm->tdma_adj_type = 7;
1550 } else if (coex_dm->cur_ps_tdma == 7) {
1551 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1552 true, 8);
1553 coex_dm->tdma_adj_type = 8;
1554 } else if (coex_dm->cur_ps_tdma == 13) {
1555 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1556 true, 14);
1557 coex_dm->tdma_adj_type = 14;
1558 } else if (coex_dm->cur_ps_tdma == 14) {
1559 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1560 true, 15);
1561 coex_dm->tdma_adj_type = 15;
1562 } else if (coex_dm->cur_ps_tdma == 15) {
1563 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1564 true, 16);
1565 coex_dm->tdma_adj_type = 16;
1566 }
1567 } else if (result == 1) {
1568 if (coex_dm->cur_ps_tdma == 8) {
1569 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1570 true, 7);
1571 coex_dm->tdma_adj_type = 7;
1572 } else if (coex_dm->cur_ps_tdma == 7) {
1573 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1574 true, 6);
1575 coex_dm->tdma_adj_type = 6;
1576 } else if (coex_dm->cur_ps_tdma == 6) {
1577 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1578 true, 5);
1579 coex_dm->tdma_adj_type = 5;
1580 } else if (coex_dm->cur_ps_tdma == 16) {
1581 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1582 true, 15);
1583 coex_dm->tdma_adj_type = 15;
1584 } else if (coex_dm->cur_ps_tdma == 15) {
1585 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1586 true, 14);
1587 coex_dm->tdma_adj_type = 14;
1588 } else if (coex_dm->cur_ps_tdma == 14) {
1589 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1590 true, 13);
1591 coex_dm->tdma_adj_type = 13;
1592 }
1593 }
1594 } else {
1595 BTC_PRINT(BTC_MSG_ALGORITHM,
1596 ALGO_TRACE_FW_DETAIL,
1597 "[BTCoex], TxPause = 0\n");
1598 if (coex_dm->cur_ps_tdma == 5) {
1599 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
1600 coex_dm->tdma_adj_type = 71;
1601 } else if (coex_dm->cur_ps_tdma == 6) {
1602 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
1603 coex_dm->tdma_adj_type = 2;
1604 } else if (coex_dm->cur_ps_tdma == 7) {
1605 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
1606 coex_dm->tdma_adj_type = 3;
1607 } else if (coex_dm->cur_ps_tdma == 8) {
1608 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
1609 coex_dm->tdma_adj_type = 4;
1610 } else if (coex_dm->cur_ps_tdma == 13) {
1611 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
1612 coex_dm->tdma_adj_type = 9;
1613 } else if (coex_dm->cur_ps_tdma == 14) {
1614 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
1615 coex_dm->tdma_adj_type = 10;
1616 } else if (coex_dm->cur_ps_tdma == 15) {
1617 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
1618 coex_dm->tdma_adj_type = 11;
1619 } else if (coex_dm->cur_ps_tdma == 16) {
1620 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
1621 coex_dm->tdma_adj_type = 12;
1622 }
1623
1624 if (result == -1) {
1625 if (coex_dm->cur_ps_tdma == 71) {
1626 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1627 true, 1);
1628 coex_dm->tdma_adj_type = 1;
1629 } else if (coex_dm->cur_ps_tdma == 1) {
1630 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1631 true, 2);
1632 coex_dm->tdma_adj_type = 2;
1633 } else if (coex_dm->cur_ps_tdma == 2) {
1634 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1635 true, 3);
1636 coex_dm->tdma_adj_type = 3;
1637 } else if (coex_dm->cur_ps_tdma == 3) {
1638 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1639 true, 4);
1640 coex_dm->tdma_adj_type = 4;
1641 } else if (coex_dm->cur_ps_tdma == 9) {
1642 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1643 true, 10);
1644 coex_dm->tdma_adj_type = 10;
1645 } else if (coex_dm->cur_ps_tdma == 10) {
1646 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1647 true, 11);
1648 coex_dm->tdma_adj_type = 11;
1649 } else if (coex_dm->cur_ps_tdma == 11) {
1650 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1651 true, 12);
1652 coex_dm->tdma_adj_type = 12;
1653 }
1654 } else if (result == 1) {
1655 int tmp = coex_dm->cur_ps_tdma;
1656 switch (tmp) {
1657 case 4:
1658 case 3:
1659 case 2:
1660 case 12:
1661 case 11:
1662 case 10:
1663 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1664 true, tmp - 1);
1665 coex_dm->tdma_adj_type = tmp - 1;
1666 break;
1667 case 1:
1668 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1669 true, 71);
1670 coex_dm->tdma_adj_type = 71;
1671 break;
1672 }
1673 }
1674 }
1675}
1676
1677static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
1678 s32 result)
1679{
1680 /* Set PS TDMA for max interval == 2 */
1681 if (tx_pause) {
1682 BTC_PRINT(BTC_MSG_ALGORITHM,
1683 ALGO_TRACE_FW_DETAIL,
1684 "[BTCoex], TxPause = 1\n");
1685 if (coex_dm->cur_ps_tdma == 1) {
1686 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
1687 coex_dm->tdma_adj_type = 6;
1688 } else if (coex_dm->cur_ps_tdma == 2) {
1689 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
1690 coex_dm->tdma_adj_type = 6;
1691 } else if (coex_dm->cur_ps_tdma == 3) {
1692 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
1693 coex_dm->tdma_adj_type = 7;
1694 } else if (coex_dm->cur_ps_tdma == 4) {
1695 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
1696 coex_dm->tdma_adj_type = 8;
1697 } else if (coex_dm->cur_ps_tdma == 9) {
1698 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
1699 coex_dm->tdma_adj_type = 14;
1700 } else if (coex_dm->cur_ps_tdma == 10) {
1701 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
1702 coex_dm->tdma_adj_type = 14;
1703 } else if (coex_dm->cur_ps_tdma == 11) {
1704 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
1705 coex_dm->tdma_adj_type = 15;
1706 } else if (coex_dm->cur_ps_tdma == 12) {
1707 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
1708 coex_dm->tdma_adj_type = 16;
1709 }
1710 if (result == -1) {
1711 if (coex_dm->cur_ps_tdma == 5) {
1712 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1713 true, 6);
1714 coex_dm->tdma_adj_type = 6;
1715 } else if (coex_dm->cur_ps_tdma == 6) {
1716 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1717 true, 7);
1718 coex_dm->tdma_adj_type = 7;
1719 } else if (coex_dm->cur_ps_tdma == 7) {
1720 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1721 true, 8);
1722 coex_dm->tdma_adj_type = 8;
1723 } else if (coex_dm->cur_ps_tdma == 13) {
1724 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1725 true, 14);
1726 coex_dm->tdma_adj_type = 14;
1727 } else if (coex_dm->cur_ps_tdma == 14) {
1728 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1729 true, 15);
1730 coex_dm->tdma_adj_type = 15;
1731 } else if (coex_dm->cur_ps_tdma == 15) {
1732 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1733 true, 16);
1734 coex_dm->tdma_adj_type = 16;
1735 }
1736 } else if (result == 1) {
1737 if (coex_dm->cur_ps_tdma == 8) {
1738 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1739 true, 7);
1740 coex_dm->tdma_adj_type = 7;
1741 } else if (coex_dm->cur_ps_tdma == 7) {
1742 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1743 true, 6);
1744 coex_dm->tdma_adj_type = 6;
1745 } else if (coex_dm->cur_ps_tdma == 6) {
1746 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1747 true, 6);
1748 coex_dm->tdma_adj_type = 6;
1749 } else if (coex_dm->cur_ps_tdma == 16) {
1750 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1751 true, 15);
1752 coex_dm->tdma_adj_type = 15;
1753 } else if (coex_dm->cur_ps_tdma == 15) {
1754 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1755 true, 14);
1756 coex_dm->tdma_adj_type = 14;
1757 } else if (coex_dm->cur_ps_tdma == 14) {
1758 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1759 true, 14);
1760 coex_dm->tdma_adj_type = 14;
1761 }
1762 }
1763 } else {
1764 BTC_PRINT(BTC_MSG_ALGORITHM,
1765 ALGO_TRACE_FW_DETAIL,
1766 "[BTCoex], TxPause = 0\n");
1767 if (coex_dm->cur_ps_tdma == 5) {
1768 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
1769 coex_dm->tdma_adj_type = 2;
1770 } else if (coex_dm->cur_ps_tdma == 6) {
1771 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
1772 coex_dm->tdma_adj_type = 2;
1773 } else if (coex_dm->cur_ps_tdma == 7) {
1774 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
1775 coex_dm->tdma_adj_type = 3;
1776 } else if (coex_dm->cur_ps_tdma == 8) {
1777 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
1778 coex_dm->tdma_adj_type = 4;
1779 } else if (coex_dm->cur_ps_tdma == 13) {
1780 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
1781 coex_dm->tdma_adj_type = 10;
1782 } else if (coex_dm->cur_ps_tdma == 14) {
1783 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
1784 coex_dm->tdma_adj_type = 10;
1785 } else if (coex_dm->cur_ps_tdma == 15) {
1786 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
1787 coex_dm->tdma_adj_type = 11;
1788 } else if (coex_dm->cur_ps_tdma == 16) {
1789 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
1790 coex_dm->tdma_adj_type = 12;
1791 }
1792 if (result == -1) {
1793 if (coex_dm->cur_ps_tdma == 1) {
1794 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1795 true, 2);
1796 coex_dm->tdma_adj_type = 2;
1797 } else if (coex_dm->cur_ps_tdma == 2) {
1798 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1799 true, 3);
1800 coex_dm->tdma_adj_type = 3;
1801 } else if (coex_dm->cur_ps_tdma == 3) {
1802 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1803 true, 4);
1804 coex_dm->tdma_adj_type = 4;
1805 } else if (coex_dm->cur_ps_tdma == 9) {
1806 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1807 true, 10);
1808 coex_dm->tdma_adj_type = 10;
1809 } else if (coex_dm->cur_ps_tdma == 10) {
1810 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1811 true, 11);
1812 coex_dm->tdma_adj_type = 11;
1813 } else if (coex_dm->cur_ps_tdma == 11) {
1814 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1815 true, 12);
1816 coex_dm->tdma_adj_type = 12;
1817 }
1818 } else if (result == 1) {
1819 if (coex_dm->cur_ps_tdma == 4) {
1820 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1821 true, 3);
1822 coex_dm->tdma_adj_type = 3;
1823 } else if (coex_dm->cur_ps_tdma == 3) {
1824 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1825 true, 2);
1826 coex_dm->tdma_adj_type = 2;
1827 } else if (coex_dm->cur_ps_tdma == 2) {
1828 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1829 true, 2);
1830 coex_dm->tdma_adj_type = 2;
1831 } else if (coex_dm->cur_ps_tdma == 12) {
1832 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1833 true, 11);
1834 coex_dm->tdma_adj_type = 11;
1835 } else if (coex_dm->cur_ps_tdma == 11) {
1836 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1837 true, 10);
1838 coex_dm->tdma_adj_type = 10;
1839 } else if (coex_dm->cur_ps_tdma == 10) {
1840 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1841 true, 10);
1842 coex_dm->tdma_adj_type = 10;
1843 }
1844 }
1845 }
1846}
1847
1848static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
1849 s32 result)
1850{
1851 /* Set PS TDMA for max interval == 3 */
1852 if (tx_pause) {
1853 BTC_PRINT(BTC_MSG_ALGORITHM,
1854 ALGO_TRACE_FW_DETAIL,
1855 "[BTCoex], TxPause = 1\n");
1856 if (coex_dm->cur_ps_tdma == 1) {
1857 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
1858 coex_dm->tdma_adj_type = 7;
1859 } else if (coex_dm->cur_ps_tdma == 2) {
1860 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
1861 coex_dm->tdma_adj_type = 7;
1862 } else if (coex_dm->cur_ps_tdma == 3) {
1863 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
1864 coex_dm->tdma_adj_type = 7;
1865 } else if (coex_dm->cur_ps_tdma == 4) {
1866 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
1867 coex_dm->tdma_adj_type = 8;
1868 } else if (coex_dm->cur_ps_tdma == 9) {
1869 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
1870 coex_dm->tdma_adj_type = 15;
1871 } else if (coex_dm->cur_ps_tdma == 10) {
1872 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
1873 coex_dm->tdma_adj_type = 15;
1874 } else if (coex_dm->cur_ps_tdma == 11) {
1875 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
1876 coex_dm->tdma_adj_type = 15;
1877 } else if (coex_dm->cur_ps_tdma == 12) {
1878 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
1879 coex_dm->tdma_adj_type = 16;
1880 }
1881 if (result == -1) {
1882 if (coex_dm->cur_ps_tdma == 5) {
1883 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1884 true, 7);
1885 coex_dm->tdma_adj_type = 7;
1886 } else if (coex_dm->cur_ps_tdma == 6) {
1887 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1888 true, 7);
1889 coex_dm->tdma_adj_type = 7;
1890 } else if (coex_dm->cur_ps_tdma == 7) {
1891 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1892 true, 8);
1893 coex_dm->tdma_adj_type = 8;
1894 } else if (coex_dm->cur_ps_tdma == 13) {
1895 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1896 true, 15);
1897 coex_dm->tdma_adj_type = 15;
1898 } else if (coex_dm->cur_ps_tdma == 14) {
1899 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1900 true, 15);
1901 coex_dm->tdma_adj_type = 15;
1902 } else if (coex_dm->cur_ps_tdma == 15) {
1903 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1904 true, 16);
1905 coex_dm->tdma_adj_type = 16;
1906 }
1907 } else if (result == 1) {
1908 if (coex_dm->cur_ps_tdma == 8) {
1909 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1910 true, 7);
1911 coex_dm->tdma_adj_type = 7;
1912 } else if (coex_dm->cur_ps_tdma == 7) {
1913 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1914 true, 7);
1915 coex_dm->tdma_adj_type = 7;
1916 } else if (coex_dm->cur_ps_tdma == 6) {
1917 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1918 true, 7);
1919 coex_dm->tdma_adj_type = 7;
1920 } else if (coex_dm->cur_ps_tdma == 16) {
1921 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1922 true, 15);
1923 coex_dm->tdma_adj_type = 15;
1924 } else if (coex_dm->cur_ps_tdma == 15) {
1925 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1926 true, 15);
1927 coex_dm->tdma_adj_type = 15;
1928 } else if (coex_dm->cur_ps_tdma == 14) {
1929 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1930 true, 15);
1931 coex_dm->tdma_adj_type = 15;
1932 }
1933 }
1934 } else {
1935 BTC_PRINT(BTC_MSG_ALGORITHM,
1936 ALGO_TRACE_FW_DETAIL,
1937 "[BTCoex], TxPause = 0\n");
1938 switch (coex_dm->cur_ps_tdma) {
1939 case 5:
1940 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
1941 coex_dm->tdma_adj_type = 3;
1942 break;
1943 case 6:
1944 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
1945 coex_dm->tdma_adj_type = 3;
1946 break;
1947 case 7:
1948 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
1949 coex_dm->tdma_adj_type = 3;
1950 break;
1951 case 8:
1952 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
1953 coex_dm->tdma_adj_type = 4;
1954 break;
1955 case 13:
1956 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
1957 coex_dm->tdma_adj_type = 11;
1958 break;
1959 case 14:
1960 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
1961 coex_dm->tdma_adj_type = 11;
1962 break;
1963 case 15:
1964 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
1965 coex_dm->tdma_adj_type = 11;
1966 break;
1967 case 16:
1968 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
1969 coex_dm->tdma_adj_type = 12;
1970 break;
1971 }
1972 if (result == -1) {
1973 switch (coex_dm->cur_ps_tdma) {
1974 case 1:
1975 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1976 true, 3);
1977 coex_dm->tdma_adj_type = 3;
1978 break;
1979 case 2:
1980 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1981 true, 3);
1982 coex_dm->tdma_adj_type = 3;
1983 break;
1984 case 3:
1985 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1986 true, 4);
1987 coex_dm->tdma_adj_type = 4;
1988 break;
1989 case 9:
1990 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1991 true, 11);
1992 coex_dm->tdma_adj_type = 11;
1993 break;
1994 case 10:
1995 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1996 true, 11);
1997 coex_dm->tdma_adj_type = 11;
1998 break;
1999 case 11:
2000 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2001 true, 12);
2002 coex_dm->tdma_adj_type = 12;
2003 break;
2004 }
2005 } else if (result == 1) {
2006 switch (coex_dm->cur_ps_tdma) {
2007 case 4:
2008 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2009 true, 3);
2010 coex_dm->tdma_adj_type = 3;
2011 break;
2012 case 3:
2013 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2014 true, 3);
2015 coex_dm->tdma_adj_type = 3;
2016 break;
2017 case 2:
2018 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2019 true, 3);
2020 coex_dm->tdma_adj_type = 3;
2021 break;
2022 case 12:
2023 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2024 true, 11);
2025 coex_dm->tdma_adj_type = 11;
2026 break;
2027 case 11:
2028 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2029 true, 11);
2030 coex_dm->tdma_adj_type = 11;
2031 break;
2032 case 10:
2033 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2034 true, 11);
2035 coex_dm->tdma_adj_type = 11;
2036 }
2037 }
2038 }
2039}
2040
2041static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2042 bool sco_hid, bool tx_pause,
2043 u8 max_interval)
2044{
2045 static s32 up, dn, m, n, wait_count;
2046 /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
2047 s32 result;
2048 u8 retry_count = 0;
2049
2050 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
2051 "[BTCoex], TdmaDurationAdjust()\n");
2052
2053 if (!coex_dm->auto_tdma_adjust) {
2054 coex_dm->auto_tdma_adjust = true;
2055 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
2056 "[BTCoex], first run TdmaDurationAdjust()!!\n");
2057 if (sco_hid) {
2058 if (tx_pause) {
2059 if (max_interval == 1) {
2060 btc8723b2ant_ps_tdma(btcoexist,
2061 NORMAL_EXEC,
2062 true, 13);
2063 coex_dm->tdma_adj_type = 13;
2064 } else if (max_interval == 2) {
2065 btc8723b2ant_ps_tdma(btcoexist,
2066 NORMAL_EXEC,
2067 true, 14);
2068 coex_dm->tdma_adj_type = 14;
2069 } else if (max_interval == 3) {
2070 btc8723b2ant_ps_tdma(btcoexist,
2071 NORMAL_EXEC,
2072 true, 15);
2073 coex_dm->tdma_adj_type = 15;
2074 } else {
2075 btc8723b2ant_ps_tdma(btcoexist,
2076 NORMAL_EXEC,
2077 true, 15);
2078 coex_dm->tdma_adj_type = 15;
2079 }
2080 } else {
2081 if (max_interval == 1) {
2082 btc8723b2ant_ps_tdma(btcoexist,
2083 NORMAL_EXEC,
2084 true, 9);
2085 coex_dm->tdma_adj_type = 9;
2086 } else if (max_interval == 2) {
2087 btc8723b2ant_ps_tdma(btcoexist,
2088 NORMAL_EXEC,
2089 true, 10);
2090 coex_dm->tdma_adj_type = 10;
2091 } else if (max_interval == 3) {
2092 btc8723b2ant_ps_tdma(btcoexist,
2093 NORMAL_EXEC,
2094 true, 11);
2095 coex_dm->tdma_adj_type = 11;
2096 } else {
2097 btc8723b2ant_ps_tdma(btcoexist,
2098 NORMAL_EXEC,
2099 true, 11);
2100 coex_dm->tdma_adj_type = 11;
2101 }
2102 }
2103 } else {
2104 if (tx_pause) {
2105 if (max_interval == 1) {
2106 btc8723b2ant_ps_tdma(btcoexist,
2107 NORMAL_EXEC,
2108 true, 5);
2109 coex_dm->tdma_adj_type = 5;
2110 } else if (max_interval == 2) {
2111 btc8723b2ant_ps_tdma(btcoexist,
2112 NORMAL_EXEC,
2113 true, 6);
2114 coex_dm->tdma_adj_type = 6;
2115 } else if (max_interval == 3) {
2116 btc8723b2ant_ps_tdma(btcoexist,
2117 NORMAL_EXEC,
2118 true, 7);
2119 coex_dm->tdma_adj_type = 7;
2120 } else {
2121 btc8723b2ant_ps_tdma(btcoexist,
2122 NORMAL_EXEC,
2123 true, 7);
2124 coex_dm->tdma_adj_type = 7;
2125 }
2126 } else {
2127 if (max_interval == 1) {
2128 btc8723b2ant_ps_tdma(btcoexist,
2129 NORMAL_EXEC,
2130 true, 1);
2131 coex_dm->tdma_adj_type = 1;
2132 } else if (max_interval == 2) {
2133 btc8723b2ant_ps_tdma(btcoexist,
2134 NORMAL_EXEC,
2135 true, 2);
2136 coex_dm->tdma_adj_type = 2;
2137 } else if (max_interval == 3) {
2138 btc8723b2ant_ps_tdma(btcoexist,
2139 NORMAL_EXEC,
2140 true, 3);
2141 coex_dm->tdma_adj_type = 3;
2142 } else {
2143 btc8723b2ant_ps_tdma(btcoexist,
2144 NORMAL_EXEC,
2145 true, 3);
2146 coex_dm->tdma_adj_type = 3;
2147 }
2148 }
2149 }
2150
2151 up = 0;
2152 dn = 0;
2153 m = 1;
2154 n = 3;
2155 result = 0;
2156 wait_count = 0;
2157 } else {
2158 /*accquire the BT TRx retry count from BT_Info byte2*/
2159 retry_count = coex_sta->bt_retry_cnt;
2160 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
2161 "[BTCoex], retry_count = %d\n", retry_count);
2162 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
2163 "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
2164 up, dn, m, n, wait_count);
2165 result = 0;
2166 wait_count++;
2167 /* no retry in the last 2-second duration*/
2168 if (retry_count == 0) {
2169 up++;
2170 dn--;
2171
2172 if (dn <= 0)
2173 dn = 0;
2174
2175 if (up >= n) {
2176 wait_count = 0;
2177 n = 3;
2178 up = 0;
2179 dn = 0;
2180 result = 1;
2181 BTC_PRINT(BTC_MSG_ALGORITHM,
2182 ALGO_TRACE_FW_DETAIL,
2183 "[BTCoex], Increase wifi "
2184 "duration!!\n");
2185 } /* <=3 retry in the last 2-second duration*/
2186 } else if (retry_count <= 3) {
2187 up--;
2188 dn++;
2189
2190 if (up <= 0)
2191 up = 0;
2192
2193 if (dn == 2) {
2194 if (wait_count <= 2)
2195 m++;
2196 else
2197 m = 1;
2198
2199 if (m >= 20)
2200 m = 20;
2201
2202 n = 3 * m;
2203 up = 0;
2204 dn = 0;
2205 wait_count = 0;
2206 result = -1;
2207 BTC_PRINT(BTC_MSG_ALGORITHM,
2208 ALGO_TRACE_FW_DETAIL,
2209 "[BTCoex], Decrease wifi duration "
2210 "for retry_counter<3!!\n");
2211 }
2212 } else {
2213 if (wait_count == 1)
2214 m++;
2215 else
2216 m = 1;
2217
2218 if (m >= 20)
2219 m = 20;
2220
2221 n = 3 * m;
2222 up = 0;
2223 dn = 0;
2224 wait_count = 0;
2225 result = -1;
2226 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
2227 "[BTCoex], Decrease wifi duration "
2228 "for retry_counter>3!!\n");
2229 }
2230
2231 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
2232 "[BTCoex], max Interval = %d\n", max_interval);
2233 if (max_interval == 1)
2234 set_tdma_int1(btcoexist, tx_pause, result);
2235 else if (max_interval == 2)
2236 set_tdma_int2(btcoexist, tx_pause, result);
2237 else if (max_interval == 3)
2238 set_tdma_int3(btcoexist, tx_pause, result);
2239 }
2240
2241 /*if current PsTdma not match with the recorded one (when scan, dhcp..),
2242 *then we have to adjust it back to the previous recorded one.
2243 */
2244 if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
2245 bool scan = false, link = false, roam = false;
2246 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
2247 "[BTCoex], PsTdma type dismatch!!!, "
2248 "curPsTdma=%d, recordPsTdma=%d\n",
2249 coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
2250
2251 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
2252 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
2253 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
2254
2255 if (!scan && !link && !roam)
2256 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
2257 coex_dm->tdma_adj_type);
2258 else
2259 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
2260 "[BTCoex], roaming/link/scan is under"
2261 " progress, will adjust next time!!!\n");
2262 }
2263}
2264
2265/* SCO only or SCO+PAN(HS) */
2266static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
2267{
2268 u8 wifi_rssi_state;
2269 u32 wifi_bw;
2270
2271 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2272 0, 2, 15, 0);
2273
2274 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2275
2276 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
2277
2278 if (btc8723b_need_dec_pwr(btcoexist))
2279 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2280 else
2281 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2282
2283 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2284
2285 /*for SCO quality at 11b/g mode*/
2286 if (BTC_WIFI_BW_LEGACY == wifi_bw)
2287 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2);
2288 else /*for SCO quality & wifi performance balance at 11n mode*/
2289 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8);
2290
2291 /*for voice quality */
2292 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
2293
2294 /* sw mechanism */
2295 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2296 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2297 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2298 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2299 false, false);
2300 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2301 true, 0x4);
2302 } else {
2303 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2304 false, false);
2305 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2306 true, 0x4);
2307 }
2308 } else {
2309 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2310 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2311 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2312 false, false);
2313 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2314 true, 0x4);
2315 } else {
2316 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2317 false, false);
2318 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2319 true, 0x4);
2320 }
2321 }
2322}
2323
2324static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
2325{
2326 u8 wifi_rssi_state, bt_rssi_state;
2327 u32 wifi_bw;
2328
2329 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2330 0, 2, 15, 0);
2331 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
2332
2333 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2334
2335 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2336
2337 if (btc8723b_need_dec_pwr(btcoexist))
2338 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2339 else
2340 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2341
2342 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2343
2344 if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
2345 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2346 else /*for HID quality & wifi performance balance at 11n mode*/
2347 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9);
2348
2349 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
2350 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
2351 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
2352 else
2353 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
2354
2355 /* sw mechanism */
2356 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2357 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2358 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2359 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2360 false, false);
2361 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2362 false, 0x18);
2363 } else {
2364 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2365 false, false);
2366 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2367 false, 0x18);
2368 }
2369 } else {
2370 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2371 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2372 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2373 false, false);
2374 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2375 false, 0x18);
2376 } else {
2377 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2378 false, false);
2379 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2380 false, 0x18);
2381 }
2382 }
2383}
2384
2385/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
2386static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
2387{
2388 u8 wifi_rssi_state, bt_rssi_state;
2389 u32 wifi_bw;
2390
2391 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2392 0, 2, 15, 0);
2393 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
2394
2395 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2396
2397 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2398
2399 if (btc8723b_need_dec_pwr(btcoexist))
2400 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2401 else
2402 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2403
2404 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2405
2406 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
2407 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
2408 btc8723b2ant_tdma_duration_adjust(btcoexist, false,
2409 false, 1);
2410 else
2411 btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 1);
2412
2413 /* sw mechanism */
2414 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2415 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2416 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2417 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2418 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2419 false, false);
2420 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2421 false, 0x18);
2422 } else {
2423 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2424 false, false);
2425 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2426 false, 0x18);
2427 }
2428 } else {
2429 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2430 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2431 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2432 false, false);
2433 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2434 false, 0x18);
2435 } else {
2436 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2437 false, false);
2438 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2439 false, 0x18);
2440 }
2441 }
2442}
2443
2444static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
2445{
2446 u8 wifi_rssi_state;
2447 u32 wifi_bw;
2448
2449 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2450 0, 2, 15, 0);
2451
2452 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2453
2454 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2455
2456 if (btc8723b_need_dec_pwr(btcoexist))
2457 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2458 else
2459 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2460
2461 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2462
2463 btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
2464
2465 /* sw mechanism */
2466 btcoexist->btc_get(btcoexist,
2467 BTC_GET_U4_WIFI_BW, &wifi_bw);
2468 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2469 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2470 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2471 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2472 false, false);
2473 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2474 false, 0x18);
2475 } else {
2476 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2477 false, false);
2478 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2479 false, 0x18);
2480 }
2481 } else {
2482 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2483 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2484 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2485 false, false);
2486 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2487 false, 0x18);
2488 } else {
2489 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2490 false, false);
2491 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2492 false, 0x18);
2493 }
2494 }
2495}
2496
2497static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
2498{
2499 u8 wifi_rssi_state, bt_rssi_state;
2500 u32 wifi_bw;
2501
2502 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2503 0, 2, 15, 0);
2504 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
2505
2506 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2507
2508 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2509
2510 if (btc8723b_need_dec_pwr(btcoexist))
2511 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2512 else
2513 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2514
2515 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10);
2516
2517 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
2518 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
2519 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
2520 else
2521 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
2522
2523 /* sw mechanism */
2524 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2525 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2526 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2527 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2528 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2529 false, false);
2530 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2531 false, 0x18);
2532 } else {
2533 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2534 false, false);
2535 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2536 false, 0x18);
2537 }
2538 } else {
2539 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2540 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2541 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2542 false, false);
2543 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2544 false, 0x18);
2545 } else {
2546 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2547 false, false);
2548 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2549 false, 0x18);
2550 }
2551 }
2552}
2553
2554/*PAN(HS) only*/
2555static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
2556{
2557 u8 wifi_rssi_state;
2558 u32 wifi_bw;
2559
2560 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2561 0, 2, 15, 0);
2562
2563 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2564
2565 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2566
2567 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2568 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
2569 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2570 else
2571 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2572
2573 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2574
2575 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
2576
2577 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2578 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2579 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2580 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2581 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2582 false, false);
2583 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2584 false, 0x18);
2585 } else {
2586 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2587 false, false);
2588 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2589 false, 0x18);
2590 }
2591 } else {
2592 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2593 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2594 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2595 false, false);
2596 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2597 false, 0x18);
2598 } else {
2599 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2600 false, false);
2601 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2602 false, 0x18);
2603 }
2604 }
2605}
2606
2607/*PAN(EDR)+A2DP*/
2608static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
2609{
2610 u8 wifi_rssi_state, bt_rssi_state;
2611 u32 wifi_bw;
2612
2613 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2614 0, 2, 15, 0);
2615 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
2616
2617 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2618
2619 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2620
2621 if (btc8723b_need_dec_pwr(btcoexist))
2622 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2623 else
2624 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2625
2626 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2627
2628 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
2629 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2630 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12);
2631 if (BTC_WIFI_BW_HT40 == wifi_bw)
2632 btc8723b2ant_tdma_duration_adjust(btcoexist, false,
2633 true, 3);
2634 else
2635 btc8723b2ant_tdma_duration_adjust(btcoexist, false,
2636 false, 3);
2637 } else {
2638 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2639 btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
2640 }
2641
2642 /* sw mechanism */
2643 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2644 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2645 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2646 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2647 false, false);
2648 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2649 false, 0x18);
2650 } else {
2651 btc8723b2ant_sw_mechanism1(btcoexist, true, false,
2652 false, false);
2653 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2654 false, 0x18);
2655 }
2656 } else {
2657 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2658 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2659 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2660 false, false);
2661 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2662 false, 0x18);
2663 } else {
2664 btc8723b2ant_sw_mechanism1(btcoexist, false, false,
2665 false, false);
2666 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2667 false, 0x18);
2668 }
2669 }
2670}
2671
2672static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
2673{
2674 u8 wifi_rssi_state, bt_rssi_state;
2675 u32 wifi_bw;
2676
2677 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2678 0, 2, 15, 0);
2679 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
2680 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2681
2682 if (btc8723b_need_dec_pwr(btcoexist))
2683 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2684 else
2685 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2686
2687 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
2688 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2689 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2690 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
2691 3);
2692 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
2693 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
2694 0xfffff, 0x780);
2695 } else {
2696 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
2697 6);
2698 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2699 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
2700 0xfffff, 0x0);
2701 }
2702 btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
2703 } else {
2704 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2705 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
2706 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
2707 0x0);
2708 btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
2709 }
2710
2711 /* sw mechanism */
2712 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2713 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2714 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2715 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2716 false, false);
2717 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2718 false, 0x18);
2719 } else {
2720 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2721 false, false);
2722 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2723 false, 0x18);
2724 }
2725 } else {
2726 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2727 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2728 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2729 false, false);
2730 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2731 false, 0x18);
2732 } else {
2733 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2734 false, false);
2735 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2736 false, 0x18);
2737 }
2738 }
2739}
2740
2741/* HID+A2DP+PAN(EDR) */
2742static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
2743{
2744 u8 wifi_rssi_state, bt_rssi_state;
2745 u32 wifi_bw;
2746
2747 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2748 0, 2, 15, 0);
2749 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
2750
2751 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2752
2753 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2754
2755 if (btc8723b_need_dec_pwr(btcoexist))
2756 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2757 else
2758 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2759
2760 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2761
2762 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2763
2764 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
2765 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2766 if (BTC_WIFI_BW_HT40 == wifi_bw)
2767 btc8723b2ant_tdma_duration_adjust(btcoexist, true,
2768 true, 2);
2769 else
2770 btc8723b2ant_tdma_duration_adjust(btcoexist, true,
2771 false, 3);
2772 } else {
2773 btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3);
2774 }
2775
2776 /* sw mechanism */
2777 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2778 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2779 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2780 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2781 false, false);
2782 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2783 false, 0x18);
2784 } else {
2785 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2786 false, false);
2787 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2788 false, 0x18);
2789 }
2790 } else {
2791 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2792 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2793 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2794 false, false);
2795 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2796 false, 0x18);
2797 } else {
2798 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2799 false, false);
2800 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2801 false, 0x18);
2802 }
2803 }
2804}
2805
2806static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
2807{
2808 u8 wifi_rssi_state, bt_rssi_state;
2809 u32 wifi_bw;
2810
2811 wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
2812 0, 2, 15, 0);
2813 bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
2814
2815 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
2816
2817 btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
2818
2819 if (btc8723b_need_dec_pwr(btcoexist))
2820 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
2821 else
2822 btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
2823
2824 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
2825
2826 btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
2827
2828 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
2829 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
2830 btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
2831 else
2832 btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
2833
2834 /* sw mechanism */
2835 if (BTC_WIFI_BW_HT40 == wifi_bw) {
2836 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2837 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2838 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2839 false, false);
2840 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2841 false, 0x18);
2842 } else {
2843 btc8723b2ant_sw_mechanism1(btcoexist, true, true,
2844 false, false);
2845 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2846 false, 0x18);
2847 }
2848 } else {
2849 if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
2850 (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
2851 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2852 false, false);
2853 btc8723b2ant_sw_mechanism2(btcoexist, true, false,
2854 false, 0x18);
2855 } else {
2856 btc8723b2ant_sw_mechanism1(btcoexist, false, true,
2857 false, false);
2858 btc8723b2ant_sw_mechanism2(btcoexist, false, false,
2859 false, 0x18);
2860 }
2861 }
2862}
2863
2864static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
2865{
2866 u8 algorithm = 0;
2867
2868 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2869 "[BTCoex], RunCoexistMechanism()===>\n");
2870
2871 if (btcoexist->manual_control) {
2872 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2873 "[BTCoex], RunCoexistMechanism(), "
2874 "return for Manual CTRL <===\n");
2875 return;
2876 }
2877
2878 if (coex_sta->under_ips) {
2879 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2880 "[BTCoex], wifi is under IPS !!!\n");
2881 return;
2882 }
2883
2884 algorithm = btc8723b2ant_action_algorithm(btcoexist);
2885 if (coex_sta->c2h_bt_inquiry_page &&
2886 (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
2887 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2888 "[BTCoex], BT is under inquiry/page scan !!\n");
2889 btc8723b2ant_action_bt_inquiry(btcoexist);
2890 return;
2891 } else {
2892 if (coex_dm->need_recover_0x948) {
2893 coex_dm->need_recover_0x948 = false;
2894 btcoexist->btc_write_2byte(btcoexist, 0x948,
2895 coex_dm->backup_0x948);
2896 }
2897 }
2898
2899 coex_dm->cur_algorithm = algorithm;
2900 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
2901 coex_dm->cur_algorithm);
2902
2903 if (btc8723b2ant_is_common_action(btcoexist)) {
2904 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2905 "[BTCoex], Action 2-Ant common.\n");
2906 coex_dm->auto_tdma_adjust = false;
2907 } else {
2908 if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
2909 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2910 "[BTCoex], preAlgorithm=%d, "
2911 "curAlgorithm=%d\n", coex_dm->pre_algorithm,
2912 coex_dm->cur_algorithm);
2913 coex_dm->auto_tdma_adjust = false;
2914 }
2915 switch (coex_dm->cur_algorithm) {
2916 case BT_8723B_2ANT_COEX_ALGO_SCO:
2917 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2918 "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
2919 btc8723b2ant_action_sco(btcoexist);
2920 break;
2921 case BT_8723B_2ANT_COEX_ALGO_HID:
2922 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2923 "[BTCoex], Action 2-Ant, algorithm = HID.\n");
2924 btc8723b2ant_action_hid(btcoexist);
2925 break;
2926 case BT_8723B_2ANT_COEX_ALGO_A2DP:
2927 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2928 "[BTCoex], Action 2-Ant, "
2929 "algorithm = A2DP.\n");
2930 btc8723b2ant_action_a2dp(btcoexist);
2931 break;
2932 case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
2933 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2934 "[BTCoex], Action 2-Ant, "
2935 "algorithm = A2DP+PAN(HS).\n");
2936 btc8723b2ant_action_a2dp_pan_hs(btcoexist);
2937 break;
2938 case BT_8723B_2ANT_COEX_ALGO_PANEDR:
2939 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2940 "[BTCoex], Action 2-Ant, "
2941 "algorithm = PAN(EDR).\n");
2942 btc8723b2ant_action_pan_edr(btcoexist);
2943 break;
2944 case BT_8723B_2ANT_COEX_ALGO_PANHS:
2945 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2946 "[BTCoex], Action 2-Ant, "
2947 "algorithm = HS mode.\n");
2948 btc8723b2ant_action_pan_hs(btcoexist);
2949 break;
2950 case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
2951 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2952 "[BTCoex], Action 2-Ant, "
2953 "algorithm = PAN+A2DP.\n");
2954 btc8723b2ant_action_pan_edr_a2dp(btcoexist);
2955 break;
2956 case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
2957 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2958 "[BTCoex], Action 2-Ant, "
2959 "algorithm = PAN(EDR)+HID.\n");
2960 btc8723b2ant_action_pan_edr_hid(btcoexist);
2961 break;
2962 case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
2963 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2964 "[BTCoex], Action 2-Ant, "
2965 "algorithm = HID+A2DP+PAN.\n");
2966 btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
2967 break;
2968 case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
2969 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2970 "[BTCoex], Action 2-Ant, "
2971 "algorithm = HID+A2DP.\n");
2972 btc8723b2ant_action_hid_a2dp(btcoexist);
2973 break;
2974 default:
2975 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
2976 "[BTCoex], Action 2-Ant, "
2977 "algorithm = coexist All Off!!\n");
2978 btc8723b2ant_coex_alloff(btcoexist);
2979 break;
2980 }
2981 coex_dm->pre_algorithm = coex_dm->cur_algorithm;
2982 }
2983}
2984
2985
2986
2987/*********************************************************************
2988 * work around function start with wa_btc8723b2ant_
2989 *********************************************************************/
2990/*********************************************************************
2991 * extern function start with EXbtc8723b2ant_
2992 *********************************************************************/
2993void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
2994{
2995 struct btc_board_info *board_info = &btcoexist->board_info;
2996 u32 u32tmp = 0, fw_ver;
2997 u8 u8tmp = 0;
2998 u8 h2c_parameter[2] = {0};
2999
3000
3001 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
3002 "[BTCoex], 2Ant Init HW Config!!\n");
3003
3004 /* backup rf 0x1e value */
3005 coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist,
3006 BTC_RF_A, 0x1e,
3007 0xfffff);
3008
3009 /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */
3010 u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
3011 u32tmp &= ~BIT23;
3012 u32tmp |= BIT24;
3013 btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
3014
3015 btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
3016 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
3017 btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
3018 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
3019
3020 /* Antenna switch control parameter */
3021 /* btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);*/
3022
3023 /*Force GNT_BT to low*/
3024 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
3025 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
3026
3027 /* 0x790[5:0]=0x5 */
3028 u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
3029 u8tmp &= 0xc0;
3030 u8tmp |= 0x5;
3031 btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
3032
3033
3034 /*Antenna config */
3035 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
3036
3037 /*ext switch for fw ver < 0xc */
3038 if (fw_ver < 0xc00) {
3039 if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
3040 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
3041 0x3, 0x1);
3042 /*Main Ant to BT for IPS case 0x4c[23]=1*/
3043 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
3044 0x1);
3045
3046 /*tell firmware "no antenna inverse"*/
3047 h2c_parameter[0] = 0;
3048 h2c_parameter[1] = 1; /* ext switch type */
3049 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
3050 h2c_parameter);
3051 } else {
3052 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
3053 0x3, 0x2);
3054 /*Aux Ant to BT for IPS case 0x4c[23]=1*/
3055 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
3056 0x0);
3057
3058 /*tell firmware "antenna inverse"*/
3059 h2c_parameter[0] = 1;
3060 h2c_parameter[1] = 1; /*ext switch type*/
3061 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
3062 h2c_parameter);
3063 }
3064 } else {
3065 /*ext switch always at s1 (if exist) */
3066 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
3067 /*Main Ant to BT for IPS case 0x4c[23]=1*/
3068 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1);
3069
3070 if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
3071 /*tell firmware "no antenna inverse"*/
3072 h2c_parameter[0] = 0;
3073 h2c_parameter[1] = 0; /*ext switch type*/
3074 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
3075 h2c_parameter);
3076 } else {
3077 /*tell firmware "antenna inverse"*/
3078 h2c_parameter[0] = 1;
3079 h2c_parameter[1] = 0; /*ext switch type*/
3080 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
3081 h2c_parameter);
3082 }
3083 }
3084
3085 /* PTA parameter */
3086 btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0);
3087
3088 /* Enable counter statistics */
3089 /*0x76e[3] =1, WLAN_Act control by PTA*/
3090 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
3091 btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
3092 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
3093}
3094
3095void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
3096{
3097 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
3098 "[BTCoex], Coex Mechanism Init!!\n");
3099 btc8723b2ant_init_coex_dm(btcoexist);
3100}
3101
3102void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
3103{
3104 struct btc_board_info *board_info = &btcoexist->board_info;
3105 struct btc_stack_info *stack_info = &btcoexist->stack_info;
3106 struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
3107 u8 *cli_buf = btcoexist->cli_buf;
3108 u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
3109 u32 u32tmp[4];
3110 bool roam = false, scan = false;
3111 bool link = false, wifi_under_5g = false;
3112 bool bt_hs_on = false, wifi_busy = false;
3113 s32 wifi_rssi = 0, bt_hs_rssi = 0;
3114 u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
3115 u8 wifi_dot11_chnl, wifi_hs_chnl;
3116 u32 fw_ver = 0, bt_patch_ver = 0;
3117
3118 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3119 "\r\n ============[BT Coexist info]============");
3120 CL_PRINTF(cli_buf);
3121
3122 if (btcoexist->manual_control) {
3123 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3124 "\r\n ==========[Under Manual Control]============");
3125 CL_PRINTF(cli_buf);
3126 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3127 "\r\n ==========================================");
3128 CL_PRINTF(cli_buf);
3129 }
3130
3131 if (!board_info->bt_exist) {
3132 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
3133 CL_PRINTF(cli_buf);
3134 return;
3135 }
3136
3137 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
3138 "Ant PG number/ Ant mechanism:",
3139 board_info->pg_ant_num, board_info->btdm_ant_num);
3140 CL_PRINTF(cli_buf);
3141
3142 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
3143 "BT stack/ hci ext ver",
3144 ((stack_info->profile_notified) ? "Yes" : "No"),
3145 stack_info->hci_version);
3146 CL_PRINTF(cli_buf);
3147
3148 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
3149 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
3150 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3151 "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
3152 "CoexVer/ FwVer/ PatchVer",
3153 glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
3154 fw_ver, bt_patch_ver, bt_patch_ver);
3155 CL_PRINTF(cli_buf);
3156
3157 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
3158 btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
3159 &wifi_dot11_chnl);
3160 btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
3161
3162 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
3163 "Dot11 channel / HsChnl(HsMode)",
3164 wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
3165 CL_PRINTF(cli_buf);
3166
3167 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
3168 "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
3169 coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
3170 CL_PRINTF(cli_buf);
3171
3172 btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
3173 btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
3174 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
3175 "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
3176 CL_PRINTF(cli_buf);
3177
3178 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
3179 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
3180 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
3181 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
3182 "Wifi link/ roam/ scan", link, roam, scan);
3183 CL_PRINTF(cli_buf);
3184
3185 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
3186 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
3187 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
3188 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
3189 &wifi_traffic_dir);
3190 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
3191 "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
3192 ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
3193 (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
3194 ((!wifi_busy) ? "idle" :
3195 ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
3196 "uplink" : "downlink")));
3197 CL_PRINTF(cli_buf);
3198
3199 CL_PRINTF(cli_buf);
3200
3201 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
3202 "SCO/HID/PAN/A2DP",
3203 bt_link_info->sco_exist, bt_link_info->hid_exist,
3204 bt_link_info->pan_exist, bt_link_info->a2dp_exist);
3205 CL_PRINTF(cli_buf);
3206 btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
3207
3208 bt_info_ext = coex_sta->bt_info_ext;
3209 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
3210 "BT Info A2DP rate",
3211 (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
3212 CL_PRINTF(cli_buf);
3213
3214 for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
3215 if (coex_sta->bt_info_c2h_cnt[i]) {
3216 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3217 "\r\n %-35s = %02x %02x %02x "
3218 "%02x %02x %02x %02x(%d)",
3219 glbt_info_src_8723b_2ant[i],
3220 coex_sta->bt_info_c2h[i][0],
3221 coex_sta->bt_info_c2h[i][1],
3222 coex_sta->bt_info_c2h[i][2],
3223 coex_sta->bt_info_c2h[i][3],
3224 coex_sta->bt_info_c2h[i][4],
3225 coex_sta->bt_info_c2h[i][5],
3226 coex_sta->bt_info_c2h[i][6],
3227 coex_sta->bt_info_c2h_cnt[i]);
3228 CL_PRINTF(cli_buf);
3229 }
3230 }
3231
3232 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
3233 "PS state, IPS/LPS",
3234 ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
3235 ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
3236 CL_PRINTF(cli_buf);
3237 btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
3238
3239 /* Sw mechanism */
3240 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3241 "\r\n %-35s", "============[Sw mechanism]============");
3242 CL_PRINTF(cli_buf);
3243 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
3244 "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
3245 coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
3246 CL_PRINTF(cli_buf);
3247 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
3248 "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
3249 coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
3250 coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
3251 CL_PRINTF(cli_buf);
3252
3253 /* Fw mechanism */
3254 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
3255 "============[Fw mechanism]============");
3256 CL_PRINTF(cli_buf);
3257
3258 ps_tdma_case = coex_dm->cur_ps_tdma;
3259 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3260 "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
3261 "PS TDMA", coex_dm->ps_tdma_para[0],
3262 coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
3263 coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
3264 ps_tdma_case, coex_dm->auto_tdma_adjust);
3265 CL_PRINTF(cli_buf);
3266
3267 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
3268 "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
3269 coex_dm->cur_ignore_wlan_act);
3270 CL_PRINTF(cli_buf);
3271
3272 /* Hw setting */
3273 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
3274 "============[Hw setting]============");
3275 CL_PRINTF(cli_buf);
3276
3277 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
3278 "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
3279 CL_PRINTF(cli_buf);
3280
3281 u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
3282 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
3283 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
3284 "0x778/0x880[29:25]", u8tmp[0],
3285 (u32tmp[0]&0x3e000000) >> 25);
3286 CL_PRINTF(cli_buf);
3287
3288 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
3289 u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
3290 u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
3291 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
3292 "0x948/ 0x67[5] / 0x765",
3293 u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]);
3294 CL_PRINTF(cli_buf);
3295
3296 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
3297 u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
3298 u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
3299 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
3300 "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
3301 u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
3302 CL_PRINTF(cli_buf);
3303
3304
3305 u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
3306 u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
3307 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
3308 u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
3309 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3310 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
3311 "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
3312 ((u8tmp[0] & 0x8)>>3), u8tmp[1],
3313 ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
3314 CL_PRINTF(cli_buf);
3315
3316 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
3317 u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
3318 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
3319 "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
3320 CL_PRINTF(cli_buf);
3321
3322 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
3323 u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
3324 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
3325 "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
3326 CL_PRINTF(cli_buf);
3327
3328 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
3329 u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
3330 u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
3331 u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
3332
3333 u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
3334 u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
3335
3336 fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) +
3337 ((u32tmp[1]&0xffff0000) >> 16) +
3338 (u32tmp[1] & 0xffff) +
3339 (u32tmp[2] & 0xffff) +
3340 ((u32tmp[3]&0xffff0000) >> 16) +
3341 (u32tmp[3] & 0xffff);
3342 fa_cck = (u8tmp[0] << 8) + u8tmp[1];
3343
3344 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
3345 "OFDM-CCA/OFDM-FA/CCK-FA",
3346 u32tmp[0]&0xffff, fa_ofdm, fa_cck);
3347 CL_PRINTF(cli_buf);
3348
3349 u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
3350 u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
3351 u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
3352 u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
3353 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
3354 "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
3355 "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
3356 u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
3357 CL_PRINTF(cli_buf);
3358
3359 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
3360 "0x770(high-pri rx/tx)",
3361 coex_sta->high_priority_rx, coex_sta->high_priority_tx);
3362 CL_PRINTF(cli_buf);
3363 CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
3364 "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
3365 coex_sta->low_priority_tx);
3366 CL_PRINTF(cli_buf);
3367#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
3368 btc8723b2ant_monitor_bt_ctr(btcoexist);
3369#endif
3370 btcoexist->btc_disp_dbg_msg(btcoexist,
3371 BTC_DBG_DISP_COEX_STATISTICS);
3372}
3373
3374
3375void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
3376{
3377 if (BTC_IPS_ENTER == type) {
3378 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3379 "[BTCoex], IPS ENTER notify\n");
3380 coex_sta->under_ips = true;
3381 btc8723b2ant_coex_alloff(btcoexist);
3382 } else if (BTC_IPS_LEAVE == type) {
3383 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3384 "[BTCoex], IPS LEAVE notify\n");
3385 coex_sta->under_ips = false;
3386 }
3387}
3388
3389void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
3390{
3391 if (BTC_LPS_ENABLE == type) {
3392 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3393 "[BTCoex], LPS ENABLE notify\n");
3394 coex_sta->under_lps = true;
3395 } else if (BTC_LPS_DISABLE == type) {
3396 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3397 "[BTCoex], LPS DISABLE notify\n");
3398 coex_sta->under_lps = false;
3399 }
3400}
3401
3402void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
3403{
3404 if (BTC_SCAN_START == type)
3405 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3406 "[BTCoex], SCAN START notify\n");
3407 else if (BTC_SCAN_FINISH == type)
3408 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3409 "[BTCoex], SCAN FINISH notify\n");
3410}
3411
3412void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
3413{
3414 if (BTC_ASSOCIATE_START == type)
3415 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3416 "[BTCoex], CONNECT START notify\n");
3417 else if (BTC_ASSOCIATE_FINISH == type)
3418 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3419 "[BTCoex], CONNECT FINISH notify\n");
3420}
3421
3422void btc8723b_med_stat_notify(struct btc_coexist *btcoexist,
3423 u8 type)
3424{
3425 u8 h2c_parameter[3] = {0};
3426 u32 wifi_bw;
3427 u8 wifi_central_chnl;
3428
3429 if (BTC_MEDIA_CONNECT == type)
3430 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3431 "[BTCoex], MEDIA connect notify\n");
3432 else
3433 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3434 "[BTCoex], MEDIA disconnect notify\n");
3435
3436 /* only 2.4G we need to inform bt the chnl mask */
3437 btcoexist->btc_get(btcoexist,
3438 BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
3439 if ((BTC_MEDIA_CONNECT == type) &&
3440 (wifi_central_chnl <= 14)) {
3441 h2c_parameter[0] = 0x1;
3442 h2c_parameter[1] = wifi_central_chnl;
3443 btcoexist->btc_get(btcoexist,
3444 BTC_GET_U4_WIFI_BW, &wifi_bw);
3445 if (BTC_WIFI_BW_HT40 == wifi_bw)
3446 h2c_parameter[2] = 0x30;
3447 else
3448 h2c_parameter[2] = 0x20;
3449 }
3450
3451 coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
3452 coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
3453 coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
3454
3455 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
3456 "[BTCoex], FW write 0x66=0x%x\n",
3457 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
3458 h2c_parameter[2]);
3459
3460 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
3461}
3462
3463void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
3464 u8 type)
3465{
3466 if (type == BTC_PACKET_DHCP)
3467 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3468 "[BTCoex], DHCP Packet notify\n");
3469}
3470
3471void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
3472 u8 *tmpbuf, u8 length)
3473{
3474 u8 bt_info = 0;
3475 u8 i, rsp_source = 0;
3476 bool bt_busy = false, limited_dig = false;
3477 bool wifi_connected = false;
3478
3479 coex_sta->c2h_bt_info_req_sent = false;
3480
3481 rsp_source = tmpbuf[0]&0xf;
3482 if (rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX)
3483 rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
3484 coex_sta->bt_info_c2h_cnt[rsp_source]++;
3485
3486 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3487 "[BTCoex], Bt info[%d], length=%d, hex data=[",
3488 rsp_source, length);
3489 for (i = 0; i < length; i++) {
3490 coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
3491 if (i == 1)
3492 bt_info = tmpbuf[i];
3493 if (i == length-1)
3494 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3495 "0x%02x]\n", tmpbuf[i]);
3496 else
3497 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3498 "0x%02x, ", tmpbuf[i]);
3499 }
3500
3501 if (btcoexist->manual_control) {
3502 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3503 "[BTCoex], BtInfoNotify(), "
3504 "return for Manual CTRL<===\n");
3505 return;
3506 }
3507
3508 if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
3509 coex_sta->bt_retry_cnt = /* [3:0]*/
3510 coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
3511
3512 coex_sta->bt_rssi =
3513 coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
3514
3515 coex_sta->bt_info_ext =
3516 coex_sta->bt_info_c2h[rsp_source][4];
3517
3518 /* Here we need to resend some wifi info to BT
3519 * because bt is reset and loss of the info.
3520 */
3521 if ((coex_sta->bt_info_ext & BIT1)) {
3522 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3523 "[BTCoex], BT ext info bit1 check,"
3524 " send wifi BW&Chnl to BT!!\n");
3525 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
3526 &wifi_connected);
3527 if (wifi_connected)
3528 btc8723b_med_stat_notify(btcoexist,
3529 BTC_MEDIA_CONNECT);
3530 else
3531 btc8723b_med_stat_notify(btcoexist,
3532 BTC_MEDIA_DISCONNECT);
3533 }
3534
3535 if ((coex_sta->bt_info_ext & BIT3)) {
3536 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3537 "[BTCoex], BT ext info bit3 check, "
3538 "set BT NOT to ignore Wlan active!!\n");
3539 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
3540 false);
3541 } else {
3542 /* BT already NOT ignore Wlan active, do nothing here.*/
3543 }
3544#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
3545 if ((coex_sta->bt_info_ext & BIT4)) {
3546 /* BT auto report already enabled, do nothing*/
3547 } else {
3548 btc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
3549 true);
3550 }
3551#endif
3552 }
3553
3554 /* check BIT2 first ==> check if bt is under inquiry or page scan*/
3555 if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE)
3556 coex_sta->c2h_bt_inquiry_page = true;
3557 else
3558 coex_sta->c2h_bt_inquiry_page = false;
3559
3560 /* set link exist status*/
3561 if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
3562 coex_sta->bt_link_exist = false;
3563 coex_sta->pan_exist = false;
3564 coex_sta->a2dp_exist = false;
3565 coex_sta->hid_exist = false;
3566 coex_sta->sco_exist = false;
3567 } else { /* connection exists */
3568 coex_sta->bt_link_exist = true;
3569 if (bt_info & BT_INFO_8723B_2ANT_B_FTP)
3570 coex_sta->pan_exist = true;
3571 else
3572 coex_sta->pan_exist = false;
3573 if (bt_info & BT_INFO_8723B_2ANT_B_A2DP)
3574 coex_sta->a2dp_exist = true;
3575 else
3576 coex_sta->a2dp_exist = false;
3577 if (bt_info & BT_INFO_8723B_2ANT_B_HID)
3578 coex_sta->hid_exist = true;
3579 else
3580 coex_sta->hid_exist = false;
3581 if (bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO)
3582 coex_sta->sco_exist = true;
3583 else
3584 coex_sta->sco_exist = false;
3585 }
3586
3587 btc8723b2ant_update_bt_link_info(btcoexist);
3588
3589 if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
3590 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
3591 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3592 "[BTCoex], BtInfoNotify(), "
3593 "BT Non-Connected idle!!!\n");
3594 /* connection exists but no busy */
3595 } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
3596 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
3597 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3598 "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
3599 } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
3600 (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
3601 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
3602 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3603 "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
3604 } else if (bt_info & BT_INFO_8723B_2ANT_B_ACL_BUSY) {
3605 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
3606 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3607 "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
3608 } else {
3609 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
3610 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3611 "[BTCoex], BtInfoNotify(), "
3612 "BT Non-Defined state!!!\n");
3613 }
3614
3615 if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
3616 (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
3617 (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
3618 bt_busy = true;
3619 limited_dig = true;
3620 } else {
3621 bt_busy = false;
3622 limited_dig = false;
3623 }
3624
3625 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
3626
3627 coex_dm->limited_dig = limited_dig;
3628 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
3629
3630 btc8723b2ant_run_coexist_mechanism(btcoexist);
3631}
3632
3633void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
3634 u8 type)
3635{
3636 if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
3637 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3638 "[BTCoex],StackOP Inquiry/page/pair start notify\n");
3639 else if (BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
3640 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
3641 "[BTCoex],StackOP Inquiry/page/pair finish notify\n");
3642}
3643
3644void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
3645{
3646 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
3647
3648 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
3649 btc8723b_med_stat_notify(btcoexist, BTC_MEDIA_DISCONNECT);
3650}
3651
3652void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist)
3653{
3654 struct btc_board_info *board_info = &btcoexist->board_info;
3655 struct btc_stack_info *stack_info = &btcoexist->stack_info;
3656 static u8 dis_ver_info_cnt;
3657 u32 fw_ver = 0, bt_patch_ver = 0;
3658
3659 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
3660 "[BTCoex], =========================="
3661 "Periodical===========================\n");
3662
3663 if (dis_ver_info_cnt <= 5) {
3664 dis_ver_info_cnt += 1;
3665 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
3666 "[BTCoex], ****************************"
3667 "************************************\n");
3668 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
3669 "[BTCoex], Ant PG Num/ Ant Mech/ "
3670 "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
3671 board_info->btdm_ant_num, board_info->btdm_ant_pos);
3672 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
3673 "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
3674 ((stack_info->profile_notified) ? "Yes" : "No"),
3675 stack_info->hci_version);
3676 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
3677 &bt_patch_ver);
3678 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
3679 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
3680 "[BTCoex], CoexVer/ FwVer/ PatchVer = "
3681 "%d_%x/ 0x%x/ 0x%x(%d)\n",
3682 glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
3683 fw_ver, bt_patch_ver, bt_patch_ver);
3684 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
3685 "[BTCoex], *****************************"
3686 "***********************************\n");
3687 }
3688
3689#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
3690 btc8723b2ant_query_bt_info(btcoexist);
3691 btc8723b2ant_monitor_bt_ctr(btcoexist);
3692 btc8723b2ant_monitor_bt_enable_disable(btcoexist);
3693#else
3694 if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
3695 coex_dm->auto_tdma_adjust)
3696 btc8723b2ant_run_coexist_mechanism(btcoexist);
3697#endif
3698}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
new file mode 100644
index 000000000000..e0ad8e545f82
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -0,0 +1,173 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25#ifndef _HAL8723B_2_ANT
26#define _HAL8723B_2_ANT
27
28/************************************************************************
29 * The following is for 8723B 2Ant BT Co-exist definition
30 ************************************************************************/
31#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1
32
33#define BT_INFO_8723B_2ANT_B_FTP BIT7
34#define BT_INFO_8723B_2ANT_B_A2DP BIT6
35#define BT_INFO_8723B_2ANT_B_HID BIT5
36#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4
37#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3
38#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2
39#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1
40#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0
41
42#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2
43
44enum BT_INFO_SRC_8723B_2ANT {
45 BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0,
46 BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1,
47 BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2,
48 BT_INFO_SRC_8723B_2ANT_MAX
49};
50
51enum BT_8723B_2ANT_BT_STATUS {
52 BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
53 BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
54 BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2,
55 BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3,
56 BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4,
57 BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
58 BT_8723B_2ANT_BT_STATUS_MAX
59};
60
61enum BT_8723B_2ANT_COEX_ALGO {
62 BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0,
63 BT_8723B_2ANT_COEX_ALGO_SCO = 0x1,
64 BT_8723B_2ANT_COEX_ALGO_HID = 0x2,
65 BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3,
66 BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
67 BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5,
68 BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6,
69 BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
70 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
71 BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
72 BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa,
73 BT_8723B_2ANT_COEX_ALGO_MAX = 0xb,
74};
75
76struct coex_dm_8723b_2ant {
77 /* fw mechanism */
78 bool pre_dec_bt_pwr;
79 bool cur_dec_bt_pwr;
80 u8 pre_fw_dac_swing_lvl;
81 u8 cur_fw_dac_swing_lvl;
82 bool cur_ignore_wlan_act;
83 bool pre_ignore_wlan_act;
84 u8 pre_ps_tdma;
85 u8 cur_ps_tdma;
86 u8 ps_tdma_para[5];
87 u8 tdma_adj_type;
88 bool reset_tdma_adjust;
89 bool auto_tdma_adjust;
90 bool pre_ps_tdma_on;
91 bool cur_ps_tdma_on;
92 bool pre_bt_auto_report;
93 bool cur_bt_auto_report;
94
95 /* sw mechanism */
96 bool pre_rf_rx_lpf_shrink;
97 bool cur_rf_rx_lpf_shrink;
98 u32 bt_rf0x1e_backup;
99 bool pre_low_penalty_ra;
100 bool cur_low_penalty_ra;
101 bool pre_dac_swing_on;
102 u32 pre_dac_swing_lvl;
103 bool cur_dac_swing_on;
104 u32 cur_dac_swing_lvl;
105 bool pre_adc_back_off;
106 bool cur_adc_back_off;
107 bool pre_agc_table_en;
108 bool cur_agc_table_en;
109 u32 pre_val0x6c0;
110 u32 cur_val0x6c0;
111 u32 pre_val0x6c4;
112 u32 cur_val0x6c4;
113 u32 pre_val0x6c8;
114 u32 cur_val0x6c8;
115 u8 pre_val0x6cc;
116 u8 cur_val0x6cc;
117 bool limited_dig;
118
119 /* algorithm related */
120 u8 pre_algorithm;
121 u8 cur_algorithm;
122 u8 bt_status;
123 u8 wifi_chnl_info[3];
124
125 bool need_recover_0x948;
126 u16 backup_0x948;
127};
128
129struct coex_sta_8723b_2ant {
130 bool bt_link_exist;
131 bool sco_exist;
132 bool a2dp_exist;
133 bool hid_exist;
134 bool pan_exist;
135
136 bool under_lps;
137 bool under_ips;
138 u32 high_priority_tx;
139 u32 high_priority_rx;
140 u32 low_priority_tx;
141 u32 low_priority_rx;
142 u8 bt_rssi;
143 u8 pre_bt_rssi_state;
144 u8 pre_wifi_rssi_state[4];
145 bool c2h_bt_info_req_sent;
146 u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
147 u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
148 bool c2h_bt_inquiry_page;
149 u8 bt_retry_cnt;
150 u8 bt_info_ext;
151};
152
153/*********************************************************************
154 * The following is interface which will notify coex module.
155 *********************************************************************/
156void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist);
157void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist);
158void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
159void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
160void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
161void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
162void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, u8 type);
163void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
164 u8 type);
165void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
166 u8 *tmpbuf, u8 length);
167void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
168 u8 type);
169void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
170void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist);
171void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
172
173#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
new file mode 100644
index 000000000000..b6722de64a31
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -0,0 +1,1011 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 ******************************************************************************/
25
26#include "halbt_precomp.h"
27
28/***********************************************
29 * Global variables
30 ***********************************************/
31
32struct btc_coexist gl_bt_coexist;
33
34u32 btc_dbg_type[BTC_MSG_MAX];
35static u8 btc_dbg_buf[100];
36
37/***************************************************
38 * Debug related function
39 ***************************************************/
40static bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
41{
42 if (!btcoexist->binded || NULL == btcoexist->adapter)
43 return false;
44
45 return true;
46}
47
48static bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
49{
50 if (rtlpriv->link_info.busytraffic)
51 return true;
52 else
53 return false;
54}
55
56static void halbtc_dbg_init(void)
57{
58 u8 i;
59
60 for (i = 0; i < BTC_MSG_MAX; i++)
61 btc_dbg_type[i] = 0;
62
63 btc_dbg_type[BTC_MSG_INTERFACE] =
64/* INTF_INIT | */
65/* INTF_NOTIFY | */
66 0;
67
68 btc_dbg_type[BTC_MSG_ALGORITHM] =
69/* ALGO_BT_RSSI_STATE | */
70/* ALGO_WIFI_RSSI_STATE | */
71/* ALGO_BT_MONITOR | */
72/* ALGO_TRACE | */
73/* ALGO_TRACE_FW | */
74/* ALGO_TRACE_FW_DETAIL | */
75/* ALGO_TRACE_FW_EXEC | */
76/* ALGO_TRACE_SW | */
77/* ALGO_TRACE_SW_DETAIL | */
78/* ALGO_TRACE_SW_EXEC | */
79 0;
80}
81
82static bool halbtc_is_bt40(struct rtl_priv *adapter)
83{
84 struct rtl_priv *rtlpriv = adapter;
85 struct rtl_phy *rtlphy = &(rtlpriv->phy);
86 bool is_ht40 = true;
87 enum ht_channel_width bw = rtlphy->current_chan_bw;
88
89 if (bw == HT_CHANNEL_WIDTH_20)
90 is_ht40 = false;
91 else if (bw == HT_CHANNEL_WIDTH_20_40)
92 is_ht40 = true;
93
94 return is_ht40;
95}
96
97static bool halbtc_legacy(struct rtl_priv *adapter)
98{
99 struct rtl_priv *rtlpriv = adapter;
100 struct rtl_mac *mac = rtl_mac(rtlpriv);
101
102 bool is_legacy = false;
103
104 if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
105 is_legacy = true;
106
107 return is_legacy;
108}
109
110bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
111{
112 struct rtl_priv *rtlpriv = adapter;
113
114 if (rtlpriv->link_info.tx_busy_traffic)
115 return true;
116 else
117 return false;
118}
119
120static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
121{
122 struct rtl_priv *rtlpriv =
123 (struct rtl_priv *)btcoexist->adapter;
124 u32 wifi_bw = BTC_WIFI_BW_HT20;
125
126 if (halbtc_is_bt40(rtlpriv)) {
127 wifi_bw = BTC_WIFI_BW_HT40;
128 } else {
129 if (halbtc_legacy(rtlpriv))
130 wifi_bw = BTC_WIFI_BW_LEGACY;
131 else
132 wifi_bw = BTC_WIFI_BW_HT20;
133 }
134 return wifi_bw;
135}
136
137static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
138{
139 struct rtl_priv *rtlpriv = btcoexist->adapter;
140 struct rtl_phy *rtlphy = &(rtlpriv->phy);
141 u8 chnl = 1;
142
143 if (rtlphy->current_channel != 0)
144 chnl = rtlphy->current_channel;
145 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
146 "static halbtc_get_wifi_central_chnl:%d\n", chnl);
147 return chnl;
148}
149
150static void halbtc_leave_lps(struct btc_coexist *btcoexist)
151{
152 struct rtl_priv *rtlpriv;
153 struct rtl_ps_ctl *ppsc;
154 bool ap_enable = false;
155
156 rtlpriv = btcoexist->adapter;
157 ppsc = rtl_psc(rtlpriv);
158
159 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
160 &ap_enable);
161
162 if (ap_enable) {
163 pr_info("halbtc_leave_lps()<--dont leave lps under AP mode\n");
164 return;
165 }
166
167 btcoexist->bt_info.bt_ctrl_lps = true;
168 btcoexist->bt_info.bt_lps_on = false;
169}
170
171static void halbtc_enter_lps(struct btc_coexist *btcoexist)
172{
173 struct rtl_priv *rtlpriv;
174 struct rtl_ps_ctl *ppsc;
175 bool ap_enable = false;
176
177 rtlpriv = btcoexist->adapter;
178 ppsc = rtl_psc(rtlpriv);
179
180 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
181 &ap_enable);
182
183 if (ap_enable) {
184 pr_info("halbtc_enter_lps()<--dont enter lps under AP mode\n");
185 return;
186 }
187
188 btcoexist->bt_info.bt_ctrl_lps = true;
189 btcoexist->bt_info.bt_lps_on = false;
190}
191
192static void halbtc_normal_lps(struct btc_coexist *btcoexist)
193{
194 if (btcoexist->bt_info.bt_ctrl_lps) {
195 btcoexist->bt_info.bt_lps_on = false;
196 btcoexist->bt_info.bt_ctrl_lps = false;
197 }
198}
199
200static void halbtc_leave_low_power(void)
201{
202}
203
204static void halbtc_nomal_low_power(void)
205{
206}
207
208static void halbtc_disable_low_power(void)
209{
210}
211
212static void halbtc_aggregation_check(void)
213{
214}
215
216static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
217{
218 return 0;
219}
220
221static s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
222{
223 struct rtl_priv *rtlpriv = adapter;
224 s32 undec_sm_pwdb = 0;
225
226 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
227 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
228 else /* associated entry pwdb */
229 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
230 return undec_sm_pwdb;
231}
232
233static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
234{
235 struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
236 struct rtl_priv *rtlpriv = btcoexist->adapter;
237 struct rtl_phy *rtlphy = &(rtlpriv->phy);
238 struct rtl_mac *mac = rtl_mac(rtlpriv);
239 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
240 bool *bool_tmp = (bool *)out_buf;
241 int *s32_tmp = (int *)out_buf;
242 u32 *u32_tmp = (u32 *)out_buf;
243 u8 *u8_tmp = (u8 *)out_buf;
244 bool tmp = false;
245
246 if (!halbtc_is_bt_coexist_available(btcoexist))
247 return false;
248
249 switch (get_type) {
250 case BTC_GET_BL_HS_OPERATION:
251 *bool_tmp = false;
252 break;
253 case BTC_GET_BL_HS_CONNECTING:
254 *bool_tmp = false;
255 break;
256 case BTC_GET_BL_WIFI_CONNECTED:
257 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
258 tmp = true;
259 *bool_tmp = tmp;
260 break;
261 case BTC_GET_BL_WIFI_BUSY:
262 if (halbtc_is_wifi_busy(rtlpriv))
263 *bool_tmp = true;
264 else
265 *bool_tmp = false;
266 break;
267 case BTC_GET_BL_WIFI_SCAN:
268 if (mac->act_scanning)
269 *bool_tmp = true;
270 else
271 *bool_tmp = false;
272 break;
273 case BTC_GET_BL_WIFI_LINK:
274 if (mac->link_state == MAC80211_LINKING)
275 *bool_tmp = true;
276 else
277 *bool_tmp = false;
278 break;
279 case BTC_GET_BL_WIFI_ROAM: /*TODO*/
280 if (mac->link_state == MAC80211_LINKING)
281 *bool_tmp = true;
282 else
283 *bool_tmp = false;
284 break;
285 case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/
286 *bool_tmp = false;
287
288 break;
289 case BTC_GET_BL_WIFI_UNDER_5G:
290 *bool_tmp = false; /*TODO*/
291
292 case BTC_GET_BL_WIFI_DHCP: /*TODO*/
293 break;
294 case BTC_GET_BL_WIFI_SOFTAP_IDLE:
295 *bool_tmp = true;
296 break;
297 case BTC_GET_BL_WIFI_SOFTAP_LINKING:
298 *bool_tmp = false;
299 break;
300 case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
301 *bool_tmp = false;
302 break;
303 case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
304 *bool_tmp = false;
305 break;
306 case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
307 if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
308 *bool_tmp = false;
309 else
310 *bool_tmp = true;
311 break;
312 case BTC_GET_BL_WIFI_UNDER_B_MODE:
313 *bool_tmp = false; /*TODO*/
314 break;
315 case BTC_GET_BL_EXT_SWITCH:
316 *bool_tmp = false;
317 break;
318 case BTC_GET_S4_WIFI_RSSI:
319 *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
320 break;
321 case BTC_GET_S4_HS_RSSI: /*TODO*/
322 *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
323 break;
324 case BTC_GET_U4_WIFI_BW:
325 *u32_tmp = halbtc_get_wifi_bw(btcoexist);
326 break;
327 case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
328 if (halbtc_is_wifi_uplink(rtlpriv))
329 *u32_tmp = BTC_WIFI_TRAFFIC_TX;
330 else
331 *u32_tmp = BTC_WIFI_TRAFFIC_RX;
332 break;
333 case BTC_GET_U4_WIFI_FW_VER:
334 *u32_tmp = rtlhal->fw_version;
335 break;
336 case BTC_GET_U4_BT_PATCH_VER:
337 *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
338 break;
339 case BTC_GET_U1_WIFI_DOT11_CHNL:
340 *u8_tmp = rtlphy->current_channel;
341 break;
342 case BTC_GET_U1_WIFI_CENTRAL_CHNL:
343 *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
344 break;
345 case BTC_GET_U1_WIFI_HS_CHNL:
346 *u8_tmp = 1;/*BT_OperateChnl(rtlpriv);*/
347 break;
348 case BTC_GET_U1_MAC_PHY_MODE:
349 *u8_tmp = BTC_MP_UNKNOWN;
350 break;
351
352 /************* 1Ant **************/
353 case BTC_GET_U1_LPS_MODE:
354 *u8_tmp = btcoexist->pwr_mode_val[0];
355 break;
356
357 default:
358 break;
359 }
360
361 return true;
362}
363
364static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
365{
366 struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
367 bool *bool_tmp = (bool *)in_buf;
368 u8 *u8_tmp = (u8 *)in_buf;
369 u32 *u32_tmp = (u32 *)in_buf;
370
371 if (!halbtc_is_bt_coexist_available(btcoexist))
372 return false;
373
374 switch (set_type) {
375 /* set some bool type variables. */
376 case BTC_SET_BL_BT_DISABLE:
377 btcoexist->bt_info.bt_disabled = *bool_tmp;
378 break;
379 case BTC_SET_BL_BT_TRAFFIC_BUSY:
380 btcoexist->bt_info.bt_busy = *bool_tmp;
381 break;
382 case BTC_SET_BL_BT_LIMITED_DIG:
383 btcoexist->bt_info.limited_dig = *bool_tmp;
384 break;
385 case BTC_SET_BL_FORCE_TO_ROAM:
386 btcoexist->bt_info.force_to_roam = *bool_tmp;
387 break;
388 case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
389 btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
390 break;
391 case BTC_SET_BL_BT_CTRL_AGG_SIZE:
392 btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp;
393 break;
394 case BTC_SET_BL_INC_SCAN_DEV_NUM:
395 btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
396 break;
397 /* set some u1Byte type variables. */
398 case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
399 btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
400 break;
401 case BTC_SET_U1_AGG_BUF_SIZE:
402 btcoexist->bt_info.agg_buf_size = *u8_tmp;
403 break;
404 /* the following are some action which will be triggered */
405 case BTC_SET_ACT_GET_BT_RSSI:
406 /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
407 break;
408 case BTC_SET_ACT_AGGREGATE_CTRL:
409 halbtc_aggregation_check();
410 break;
411
412 /* 1Ant */
413 case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
414 btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
415 break;
416 case BTC_SET_UI_SCAN_SIG_COMPENSATION:
417 /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */
418 break;
419 case BTC_SET_U1_1ANT_LPS:
420 btcoexist->bt_info.lps_1ant = *u8_tmp;
421 break;
422 case BTC_SET_U1_1ANT_RPWM:
423 btcoexist->bt_info.rpwm_1ant = *u8_tmp;
424 break;
425 /* the following are some action which will be triggered */
426 case BTC_SET_ACT_LEAVE_LPS:
427 halbtc_leave_lps(btcoexist);
428 break;
429 case BTC_SET_ACT_ENTER_LPS:
430 halbtc_enter_lps(btcoexist);
431 break;
432 case BTC_SET_ACT_NORMAL_LPS:
433 halbtc_normal_lps(btcoexist);
434 break;
435 case BTC_SET_ACT_DISABLE_LOW_POWER:
436 halbtc_disable_low_power();
437 break;
438 case BTC_SET_ACT_UPDATE_ra_mask:
439 btcoexist->bt_info.ra_mask = *u32_tmp;
440 break;
441 case BTC_SET_ACT_SEND_MIMO_PS:
442 break;
443 case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
444 btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
445 break;
446 case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
447 break;
448 case BTC_SET_ACT_CTRL_BT_COEX:
449 break;
450 default:
451 break;
452 }
453
454 return true;
455}
456
457static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
458{
459}
460
461static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
462{
463}
464
465static void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
466{
467}
468
469static void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
470{
471}
472
473/************************************************************
474 * IO related function
475 ************************************************************/
476static u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
477{
478 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
479 struct rtl_priv *rtlpriv = btcoexist->adapter;
480
481 return rtl_read_byte(rtlpriv, reg_addr);
482}
483
484static u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
485{
486 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
487 struct rtl_priv *rtlpriv = btcoexist->adapter;
488
489 return rtl_read_word(rtlpriv, reg_addr);
490}
491
492static u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
493{
494 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
495 struct rtl_priv *rtlpriv = btcoexist->adapter;
496
497 return rtl_read_dword(rtlpriv, reg_addr);
498}
499
500static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data)
501{
502 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
503 struct rtl_priv *rtlpriv = btcoexist->adapter;
504
505 rtl_write_byte(rtlpriv, reg_addr, data);
506}
507
508static void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr,
509 u32 bit_mask, u8 data)
510{
511 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
512 struct rtl_priv *rtlpriv = btcoexist->adapter;
513 u8 original_value, bit_shift = 0;
514 u8 i;
515
516 if (bit_mask != MASKDWORD) {/*if not "double word" write*/
517 original_value = rtl_read_byte(rtlpriv, reg_addr);
518 for (i = 0; i <= 7; i++) {
519 if ((bit_mask>>i) & 0x1)
520 break;
521 }
522 bit_shift = i;
523 data = (original_value & (~bit_mask)) |
524 ((data << bit_shift) & bit_mask);
525 }
526 rtl_write_byte(rtlpriv, reg_addr, data);
527}
528
529static void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
530{
531 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
532 struct rtl_priv *rtlpriv = btcoexist->adapter;
533
534 rtl_write_word(rtlpriv, reg_addr, data);
535}
536
537static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
538{
539 struct btc_coexist *btcoexist =
540 (struct btc_coexist *)bt_context;
541 struct rtl_priv *rtlpriv = btcoexist->adapter;
542
543 rtl_write_dword(rtlpriv, reg_addr, data);
544}
545
546static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
547 u32 data)
548{
549 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
550 struct rtl_priv *rtlpriv = btcoexist->adapter;
551
552 rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
553}
554
555static u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
556{
557 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
558 struct rtl_priv *rtlpriv = btcoexist->adapter;
559
560 return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
561}
562
563static void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
564 u32 bit_mask, u32 data)
565{
566 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
567 struct rtl_priv *rtlpriv = btcoexist->adapter;
568
569 rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
570}
571
572static u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
573 u32 bit_mask)
574{
575 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
576 struct rtl_priv *rtlpriv = btcoexist->adapter;
577
578 return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
579}
580
581static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
582 u32 cmd_len, u8 *cmd_buf)
583{
584 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
585 struct rtl_priv *rtlpriv = btcoexist->adapter;
586
587 rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id,
588 cmd_len, cmd_buf);
589}
590
591static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
592{
593 struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
594 switch (disp_type) {
595 case BTC_DBG_DISP_COEX_STATISTICS:
596 halbtc_display_coex_statistics(btcoexist);
597 break;
598 case BTC_DBG_DISP_BT_LINK_INFO:
599 halbtc_display_bt_link_info(btcoexist);
600 break;
601 case BTC_DBG_DISP_BT_FW_VER:
602 halbtc_display_bt_fw_info(btcoexist);
603 break;
604 case BTC_DBG_DISP_FW_PWR_MODE_CMD:
605 halbtc_display_fw_pwr_mode_cmd(btcoexist);
606 break;
607 default:
608 break;
609 }
610}
611
612/*****************************************************************
613 * Extern functions called by other module
614 *****************************************************************/
615bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
616{
617 struct btc_coexist *btcoexist = &gl_bt_coexist;
618
619 btcoexist->statistics.cnt_bind++;
620
621 halbtc_dbg_init();
622
623 if (btcoexist->binded)
624 return false;
625 else
626 btcoexist->binded = true;
627
628#if (defined(CONFIG_PCI_HCI))
629 btcoexist->chip_interface = BTC_INTF_PCI;
630#elif (defined(CONFIG_USB_HCI))
631 btcoexist->chip_interface = BTC_INTF_USB;
632#elif (defined(CONFIG_SDIO_HCI))
633 btcoexist->chip_interface = BTC_INTF_SDIO;
634#elif (defined(CONFIG_GSPI_HCI))
635 btcoexist->chip_interface = BTC_INTF_GSPI;
636#else
637 btcoexist->chip_interface = BTC_INTF_UNKNOWN;
638#endif
639
640 if (NULL == btcoexist->adapter)
641 btcoexist->adapter = adapter;
642
643 btcoexist->stack_info.profile_notified = false;
644
645 btcoexist->btc_read_1byte = halbtc_read_1byte;
646 btcoexist->btc_write_1byte = halbtc_write_1byte;
647 btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
648 btcoexist->btc_read_2byte = halbtc_read_2byte;
649 btcoexist->btc_write_2byte = halbtc_write_2byte;
650 btcoexist->btc_read_4byte = halbtc_read_4byte;
651 btcoexist->btc_write_4byte = halbtc_write_4byte;
652
653 btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
654 btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
655
656 btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
657 btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
658
659 btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
660 btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
661
662 btcoexist->btc_get = halbtc_get;
663 btcoexist->btc_set = halbtc_set;
664
665 btcoexist->cli_buf = &btc_dbg_buf[0];
666
667 btcoexist->bt_info.b_bt_ctrl_buf_size = false;
668 btcoexist->bt_info.agg_buf_size = 5;
669
670 btcoexist->bt_info.increase_scan_dev_num = false;
671 return true;
672}
673
674void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
675{
676 struct rtl_priv *rtlpriv = btcoexist->adapter;
677 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
678
679 if (!halbtc_is_bt_coexist_available(btcoexist))
680 return;
681
682 btcoexist->statistics.cnt_init_hw_config++;
683
684 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
685 ex_halbtc8723b2ant_init_hwconfig(btcoexist);
686}
687
688void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
689{
690 struct rtl_priv *rtlpriv = btcoexist->adapter;
691 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
692
693 if (!halbtc_is_bt_coexist_available(btcoexist))
694 return;
695
696 btcoexist->statistics.cnt_init_coex_dm++;
697
698 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
699 ex_halbtc8723b2ant_init_coex_dm(btcoexist);
700
701 btcoexist->initilized = true;
702}
703
704void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
705{
706 struct rtl_priv *rtlpriv = btcoexist->adapter;
707 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
708 u8 ips_type;
709
710 if (!halbtc_is_bt_coexist_available(btcoexist))
711 return;
712 btcoexist->statistics.cnt_ips_notify++;
713 if (btcoexist->manual_control)
714 return;
715
716 if (ERFOFF == type)
717 ips_type = BTC_IPS_ENTER;
718 else
719 ips_type = BTC_IPS_LEAVE;
720
721 halbtc_leave_low_power();
722
723 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
724 ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type);
725
726 halbtc_nomal_low_power();
727}
728
729void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
730{
731 struct rtl_priv *rtlpriv = btcoexist->adapter;
732 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
733 u8 lps_type;
734
735 if (!halbtc_is_bt_coexist_available(btcoexist))
736 return;
737 btcoexist->statistics.cnt_lps_notify++;
738 if (btcoexist->manual_control)
739 return;
740
741 if (EACTIVE == type)
742 lps_type = BTC_LPS_DISABLE;
743 else
744 lps_type = BTC_LPS_ENABLE;
745
746 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
747 ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type);
748}
749
750void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
751{
752 struct rtl_priv *rtlpriv = btcoexist->adapter;
753 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
754 u8 scan_type;
755
756 if (!halbtc_is_bt_coexist_available(btcoexist))
757 return;
758 btcoexist->statistics.cnt_scan_notify++;
759 if (btcoexist->manual_control)
760 return;
761
762 if (type)
763 scan_type = BTC_SCAN_START;
764 else
765 scan_type = BTC_SCAN_FINISH;
766
767 halbtc_leave_low_power();
768
769 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
770 ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type);
771
772 halbtc_nomal_low_power();
773}
774
775void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
776{
777 struct rtl_priv *rtlpriv = btcoexist->adapter;
778 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
779 u8 asso_type;
780
781 if (!halbtc_is_bt_coexist_available(btcoexist))
782 return;
783 btcoexist->statistics.cnt_connect_notify++;
784 if (btcoexist->manual_control)
785 return;
786
787 if (action)
788 asso_type = BTC_ASSOCIATE_START;
789 else
790 asso_type = BTC_ASSOCIATE_FINISH;
791
792 halbtc_leave_low_power();
793
794 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
795 ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type);
796}
797
798void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
799 enum _RT_MEDIA_STATUS media_status)
800{
801 struct rtl_priv *rtlpriv = btcoexist->adapter;
802 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
803 u8 status;
804
805 if (!halbtc_is_bt_coexist_available(btcoexist))
806 return;
807 btcoexist->statistics.cnt_media_status_notify++;
808 if (btcoexist->manual_control)
809 return;
810
811 if (RT_MEDIA_CONNECT == media_status)
812 status = BTC_MEDIA_CONNECT;
813 else
814 status = BTC_MEDIA_DISCONNECT;
815
816 halbtc_leave_low_power();
817
818 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
819 btc8723b_med_stat_notify(btcoexist, status);
820
821 halbtc_nomal_low_power();
822}
823
824void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
825{
826 struct rtl_priv *rtlpriv = btcoexist->adapter;
827 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
828 u8 packet_type;
829
830 if (!halbtc_is_bt_coexist_available(btcoexist))
831 return;
832 btcoexist->statistics.cnt_special_packet_notify++;
833 if (btcoexist->manual_control)
834 return;
835
836 packet_type = BTC_PACKET_DHCP;
837
838 halbtc_leave_low_power();
839
840 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
841 ex_halbtc8723b2ant_special_packet_notify(btcoexist,
842 packet_type);
843
844 halbtc_nomal_low_power();
845}
846
847void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
848 u8 *tmp_buf, u8 length)
849{
850 struct rtl_priv *rtlpriv = btcoexist->adapter;
851 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
852 if (!halbtc_is_bt_coexist_available(btcoexist))
853 return;
854 btcoexist->statistics.cnt_bt_info_notify++;
855
856 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
857 ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
858}
859
860void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
861{
862 struct rtl_priv *rtlpriv = btcoexist->adapter;
863 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
864 u8 stack_op_type;
865
866 if (!halbtc_is_bt_coexist_available(btcoexist))
867 return;
868 btcoexist->statistics.cnt_stack_operation_notify++;
869 if (btcoexist->manual_control)
870 return;
871
872 stack_op_type = BTC_STACK_OP_NONE;
873
874 halbtc_leave_low_power();
875
876 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
877 ex_halbtc8723b2ant_stack_operation_notify(btcoexist,
878 stack_op_type);
879
880 halbtc_nomal_low_power();
881}
882
883void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
884{
885 struct rtl_priv *rtlpriv = btcoexist->adapter;
886 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
887 if (!halbtc_is_bt_coexist_available(btcoexist))
888 return;
889
890 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
891 ex_halbtc8723b2ant_halt_notify(btcoexist);
892}
893
894void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
895{
896 if (!halbtc_is_bt_coexist_available(btcoexist))
897 return;
898}
899
900void exhalbtc_periodical(struct btc_coexist *btcoexist)
901{
902 struct rtl_priv *rtlpriv = btcoexist->adapter;
903 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
904 if (!halbtc_is_bt_coexist_available(btcoexist))
905 return;
906 btcoexist->statistics.cnt_periodical++;
907
908 halbtc_leave_low_power();
909
910 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
911 ex_halbtc8723b2ant_periodical(btcoexist);
912
913 halbtc_nomal_low_power();
914}
915
916void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
917 u8 code, u8 len, u8 *data)
918{
919 if (!halbtc_is_bt_coexist_available(btcoexist))
920 return;
921 btcoexist->statistics.cnt_dbg_ctrl++;
922}
923
924void exhalbtc_stack_update_profile_info(void)
925{
926}
927
928void exhalbtc_update_min_bt_rssi(char bt_rssi)
929{
930 struct btc_coexist *btcoexist = &gl_bt_coexist;
931
932 if (!halbtc_is_bt_coexist_available(btcoexist))
933 return;
934
935 btcoexist->stack_info.min_bt_rssi = bt_rssi;
936}
937
938void exhalbtc_set_hci_version(u16 hci_version)
939{
940 struct btc_coexist *btcoexist = &gl_bt_coexist;
941
942 if (!halbtc_is_bt_coexist_available(btcoexist))
943 return;
944
945 btcoexist->stack_info.hci_version = hci_version;
946}
947
948void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
949{
950 struct btc_coexist *btcoexist = &gl_bt_coexist;
951
952 if (!halbtc_is_bt_coexist_available(btcoexist))
953 return;
954
955 btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
956 btcoexist->bt_info.bt_hci_ver = bt_hci_version;
957}
958
959void exhalbtc_set_bt_exist(bool bt_exist)
960{
961 gl_bt_coexist.board_info.bt_exist = bt_exist;
962}
963
964void exhalbtc_set_chip_type(u8 chip_type)
965{
966 switch (chip_type) {
967 default:
968 case BT_2WIRE:
969 case BT_ISSC_3WIRE:
970 case BT_ACCEL:
971 case BT_RTL8756:
972 gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
973 break;
974 case BT_CSR_BC4:
975 gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
976 break;
977 case BT_CSR_BC8:
978 gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
979 break;
980 case BT_RTL8723A:
981 gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
982 break;
983 case BT_RTL8821A:
984 gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
985 break;
986 case BT_RTL8723B:
987 gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
988 break;
989 }
990}
991
992void exhalbtc_set_ant_num(u8 type, u8 ant_num)
993{
994 if (BT_COEX_ANT_TYPE_PG == type) {
995 gl_bt_coexist.board_info.pg_ant_num = ant_num;
996 gl_bt_coexist.board_info.btdm_ant_num = ant_num;
997 } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
998 gl_bt_coexist.board_info.btdm_ant_num = ant_num;
999 }
1000}
1001
1002void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
1003{
1004 struct rtl_priv *rtlpriv = btcoexist->adapter;
1005 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1006 if (!halbtc_is_bt_coexist_available(btcoexist))
1007 return;
1008
1009 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
1010 ex_halbtc8723b2ant_display_coex_info(btcoexist);
1011}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
new file mode 100644
index 000000000000..871fc3c6d559
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -0,0 +1,559 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25#ifndef __HALBTC_OUT_SRC_H__
26#define __HALBTC_OUT_SRC_H__
27
28#include "../wifi.h"
29
30#define NORMAL_EXEC false
31#define FORCE_EXEC true
32
33#define BTC_RF_A RF90_PATH_A
34#define BTC_RF_B RF90_PATH_B
35#define BTC_RF_C RF90_PATH_C
36#define BTC_RF_D RF90_PATH_D
37
38#define BTC_SMSP SINGLEMAC_SINGLEPHY
39#define BTC_DMDP DUALMAC_DUALPHY
40#define BTC_DMSP DUALMAC_SINGLEPHY
41#define BTC_MP_UNKNOWN 0xff
42
43#define IN
44#define OUT
45
46#define BT_TMP_BUF_SIZE 100
47
48#define BT_COEX_ANT_TYPE_PG 0
49#define BT_COEX_ANT_TYPE_ANTDIV 1
50#define BT_COEX_ANT_TYPE_DETECTED 2
51
52#define BTC_MIMO_PS_STATIC 0
53#define BTC_MIMO_PS_DYNAMIC 1
54
55#define BTC_RATE_DISABLE 0
56#define BTC_RATE_ENABLE 1
57
58#define BTC_ANT_PATH_WIFI 0
59#define BTC_ANT_PATH_BT 1
60#define BTC_ANT_PATH_PTA 2
61
62enum btc_chip_interface {
63 BTC_INTF_UNKNOWN = 0,
64 BTC_INTF_PCI = 1,
65 BTC_INTF_USB = 2,
66 BTC_INTF_SDIO = 3,
67 BTC_INTF_GSPI = 4,
68 BTC_INTF_MAX
69};
70
71enum BTC_CHIP_TYPE {
72 BTC_CHIP_UNDEF = 0,
73 BTC_CHIP_CSR_BC4 = 1,
74 BTC_CHIP_CSR_BC8 = 2,
75 BTC_CHIP_RTL8723A = 3,
76 BTC_CHIP_RTL8821 = 4,
77 BTC_CHIP_RTL8723B = 5,
78 BTC_CHIP_MAX
79};
80
81enum BTC_MSG_TYPE {
82 BTC_MSG_INTERFACE = 0x0,
83 BTC_MSG_ALGORITHM = 0x1,
84 BTC_MSG_MAX
85};
86extern u32 btc_dbg_type[];
87
88/* following is for BTC_MSG_INTERFACE */
89#define INTF_INIT BIT0
90#define INTF_NOTIFY BIT2
91
92/* following is for BTC_ALGORITHM */
93#define ALGO_BT_RSSI_STATE BIT0
94#define ALGO_WIFI_RSSI_STATE BIT1
95#define ALGO_BT_MONITOR BIT2
96#define ALGO_TRACE BIT3
97#define ALGO_TRACE_FW BIT4
98#define ALGO_TRACE_FW_DETAIL BIT5
99#define ALGO_TRACE_FW_EXEC BIT6
100#define ALGO_TRACE_SW BIT7
101#define ALGO_TRACE_SW_DETAIL BIT8
102#define ALGO_TRACE_SW_EXEC BIT9
103
104#define BT_COEX_ANT_TYPE_PG 0
105#define BT_COEX_ANT_TYPE_ANTDIV 1
106#define BT_COEX_ANT_TYPE_DETECTED 2
107#define BTC_MIMO_PS_STATIC 0
108#define BTC_MIMO_PS_DYNAMIC 1
109#define BTC_RATE_DISABLE 0
110#define BTC_RATE_ENABLE 1
111#define BTC_ANT_PATH_WIFI 0
112#define BTC_ANT_PATH_BT 1
113#define BTC_ANT_PATH_PTA 2
114
115
116#define CL_SPRINTF snprintf
117#define CL_PRINTF printk
118
119#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \
120 do { \
121 if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
122 printk(printstr, ##__VA_ARGS__); \
123 } \
124 } while (0)
125
126#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \
127 do { \
128 if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
129 pr_info("%s: ", __func__); \
130 printk(printstr, ##__VA_ARGS__); \
131 } \
132 } while (0)
133
134#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \
135 do { \
136 if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
137 int __i; \
138 u8 *__ptr = (u8 *)_ptr; \
139 printk printstr; \
140 for (__i = 0; __i < 6; __i++) \
141 printk("%02X%s", __ptr[__i], (__i == 5) ? \
142 "" : "-"); \
143 pr_info("\n"); \
144 } \
145 } while (0)
146
147#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \
148 do { \
149 if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
150 int __i; \
151 u8 *__ptr = (u8 *)_hexdata; \
152 printk(_titlestring); \
153 for (__i = 0; __i < (int)_hexdatalen; __i++) { \
154 printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \
155 == 0) ? " " : " ");\
156 if (((__i + 1) % 16) == 0) \
157 printk("\n"); \
158 } \
159 pr_debug("\n"); \
160 } \
161 } while (0)
162
163#define BTC_ANT_PATH_WIFI 0
164#define BTC_ANT_PATH_BT 1
165#define BTC_ANT_PATH_PTA 2
166
167enum btc_power_save_type {
168 BTC_PS_WIFI_NATIVE = 0,
169 BTC_PS_LPS_ON = 1,
170 BTC_PS_LPS_OFF = 2,
171 BTC_PS_LPS_MAX
172};
173
174struct btc_board_info {
175 /* The following is some board information */
176 u8 bt_chip_type;
177 u8 pg_ant_num; /* pg ant number */
178 u8 btdm_ant_num; /* ant number for btdm */
179 u8 btdm_ant_pos;
180 bool bt_exist;
181};
182
183enum btc_dbg_opcode {
184 BTC_DBG_SET_COEX_NORMAL = 0x0,
185 BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
186 BTC_DBG_SET_COEX_BT_ONLY = 0x2,
187 BTC_DBG_MAX
188};
189
190enum btc_rssi_state {
191 BTC_RSSI_STATE_HIGH = 0x0,
192 BTC_RSSI_STATE_MEDIUM = 0x1,
193 BTC_RSSI_STATE_LOW = 0x2,
194 BTC_RSSI_STATE_STAY_HIGH = 0x3,
195 BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
196 BTC_RSSI_STATE_STAY_LOW = 0x5,
197 BTC_RSSI_MAX
198};
199
200enum btc_wifi_role {
201 BTC_ROLE_STATION = 0x0,
202 BTC_ROLE_AP = 0x1,
203 BTC_ROLE_IBSS = 0x2,
204 BTC_ROLE_HS_MODE = 0x3,
205 BTC_ROLE_MAX
206};
207
208enum btc_wifi_bw_mode {
209 BTC_WIFI_BW_LEGACY = 0x0,
210 BTC_WIFI_BW_HT20 = 0x1,
211 BTC_WIFI_BW_HT40 = 0x2,
212 BTC_WIFI_BW_MAX
213};
214
215enum btc_wifi_traffic_dir {
216 BTC_WIFI_TRAFFIC_TX = 0x0,
217 BTC_WIFI_TRAFFIC_RX = 0x1,
218 BTC_WIFI_TRAFFIC_MAX
219};
220
221enum btc_wifi_pnp {
222 BTC_WIFI_PNP_WAKE_UP = 0x0,
223 BTC_WIFI_PNP_SLEEP = 0x1,
224 BTC_WIFI_PNP_MAX
225};
226
227
228enum btc_get_type {
229 /* type bool */
230 BTC_GET_BL_HS_OPERATION,
231 BTC_GET_BL_HS_CONNECTING,
232 BTC_GET_BL_WIFI_CONNECTED,
233 BTC_GET_BL_WIFI_BUSY,
234 BTC_GET_BL_WIFI_SCAN,
235 BTC_GET_BL_WIFI_LINK,
236 BTC_GET_BL_WIFI_DHCP,
237 BTC_GET_BL_WIFI_SOFTAP_IDLE,
238 BTC_GET_BL_WIFI_SOFTAP_LINKING,
239 BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
240 BTC_GET_BL_WIFI_ROAM,
241 BTC_GET_BL_WIFI_4_WAY_PROGRESS,
242 BTC_GET_BL_WIFI_UNDER_5G,
243 BTC_GET_BL_WIFI_AP_MODE_ENABLE,
244 BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
245 BTC_GET_BL_WIFI_UNDER_B_MODE,
246 BTC_GET_BL_EXT_SWITCH,
247
248 /* type s4Byte */
249 BTC_GET_S4_WIFI_RSSI,
250 BTC_GET_S4_HS_RSSI,
251
252 /* type u32 */
253 BTC_GET_U4_WIFI_BW,
254 BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
255 BTC_GET_U4_WIFI_FW_VER,
256 BTC_GET_U4_BT_PATCH_VER,
257
258 /* type u1Byte */
259 BTC_GET_U1_WIFI_DOT11_CHNL,
260 BTC_GET_U1_WIFI_CENTRAL_CHNL,
261 BTC_GET_U1_WIFI_HS_CHNL,
262 BTC_GET_U1_MAC_PHY_MODE,
263
264 /* for 1Ant */
265 BTC_GET_U1_LPS_MODE,
266 BTC_GET_BL_BT_SCO_BUSY,
267
268 /* for test mode */
269 BTC_GET_DRIVER_TEST_CFG,
270 BTC_GET_MAX
271};
272
273
274enum btc_set_type {
275 /* type bool */
276 BTC_SET_BL_BT_DISABLE,
277 BTC_SET_BL_BT_TRAFFIC_BUSY,
278 BTC_SET_BL_BT_LIMITED_DIG,
279 BTC_SET_BL_FORCE_TO_ROAM,
280 BTC_SET_BL_TO_REJ_AP_AGG_PKT,
281 BTC_SET_BL_BT_CTRL_AGG_SIZE,
282 BTC_SET_BL_INC_SCAN_DEV_NUM,
283
284 /* type u1Byte */
285 BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
286 BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
287 BTC_SET_UI_SCAN_SIG_COMPENSATION,
288 BTC_SET_U1_AGG_BUF_SIZE,
289
290 /* type trigger some action */
291 BTC_SET_ACT_GET_BT_RSSI,
292 BTC_SET_ACT_AGGREGATE_CTRL,
293
294 /********* for 1Ant **********/
295 /* type bool */
296 BTC_SET_BL_BT_SCO_BUSY,
297 /* type u1Byte */
298 BTC_SET_U1_1ANT_LPS,
299 BTC_SET_U1_1ANT_RPWM,
300 /* type trigger some action */
301 BTC_SET_ACT_LEAVE_LPS,
302 BTC_SET_ACT_ENTER_LPS,
303 BTC_SET_ACT_NORMAL_LPS,
304 BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
305 BTC_SET_ACT_DISABLE_LOW_POWER,
306 BTC_SET_ACT_UPDATE_ra_mask,
307 BTC_SET_ACT_SEND_MIMO_PS,
308 /* BT Coex related */
309 BTC_SET_ACT_CTRL_BT_INFO,
310 BTC_SET_ACT_CTRL_BT_COEX,
311 /***************************/
312 BTC_SET_MAX
313};
314
315enum btc_dbg_disp_type {
316 BTC_DBG_DISP_COEX_STATISTICS = 0x0,
317 BTC_DBG_DISP_BT_LINK_INFO = 0x1,
318 BTC_DBG_DISP_BT_FW_VER = 0x2,
319 BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
320 BTC_DBG_DISP_MAX
321};
322
323enum btc_notify_type_ips {
324 BTC_IPS_LEAVE = 0x0,
325 BTC_IPS_ENTER = 0x1,
326 BTC_IPS_MAX
327};
328
329enum btc_notify_type_lps {
330 BTC_LPS_DISABLE = 0x0,
331 BTC_LPS_ENABLE = 0x1,
332 BTC_LPS_MAX
333};
334
335enum btc_notify_type_scan {
336 BTC_SCAN_FINISH = 0x0,
337 BTC_SCAN_START = 0x1,
338 BTC_SCAN_MAX
339};
340
341enum btc_notify_type_associate {
342 BTC_ASSOCIATE_FINISH = 0x0,
343 BTC_ASSOCIATE_START = 0x1,
344 BTC_ASSOCIATE_MAX
345};
346
347enum btc_notify_type_media_status {
348 BTC_MEDIA_DISCONNECT = 0x0,
349 BTC_MEDIA_CONNECT = 0x1,
350 BTC_MEDIA_MAX
351};
352
353enum btc_notify_type_special_packet {
354 BTC_PACKET_UNKNOWN = 0x0,
355 BTC_PACKET_DHCP = 0x1,
356 BTC_PACKET_ARP = 0x2,
357 BTC_PACKET_EAPOL = 0x3,
358 BTC_PACKET_MAX
359};
360
361enum btc_notify_type_stack_operation {
362 BTC_STACK_OP_NONE = 0x0,
363 BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
364 BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
365 BTC_STACK_OP_MAX
366};
367
368
369typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
370
371typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
372
373typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
374
375typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data);
376
377typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
378 u32 bit_mask, u8 data1b);
379
380typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
381
382typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
383
384typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
385 u8 bit_mask, u8 data);
386
387typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
388 u32 bit_mask, u32 data);
389
390typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
391 u32 bit_mask);
392
393typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
394 u32 bit_mask, u32 data);
395
396typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
397 u32 reg_addr, u32 bit_mask);
398
399typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
400 u32 cmd_len, u8 *cmd_buffer);
401
402typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
403
404typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
405
406typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
407
408struct btc_bt_info {
409 bool bt_disabled;
410 u8 rssi_adjust_for_agc_table_on;
411 u8 rssi_adjust_for_1ant_coex_type;
412 bool bt_busy;
413 u8 agg_buf_size;
414 bool limited_dig;
415 bool reject_agg_pkt;
416 bool b_bt_ctrl_buf_size;
417 bool increase_scan_dev_num;
418 u16 bt_hci_ver;
419 u16 bt_real_fw_ver;
420 u8 bt_fw_ver;
421
422 /* the following is for 1Ant solution */
423 bool bt_ctrl_lps;
424 bool bt_pwr_save_mode;
425 bool bt_lps_on;
426 bool force_to_roam;
427 u8 force_exec_pwr_cmd_cnt;
428 u8 lps_1ant;
429 u8 rpwm_1ant;
430 u32 ra_mask;
431};
432
433struct btc_stack_info {
434 bool profile_notified;
435 u16 hci_version; /* stack hci version */
436 u8 num_of_link;
437 bool bt_link_exist;
438 bool sco_exist;
439 bool acl_exist;
440 bool a2dp_exist;
441 bool hid_exist;
442 u8 num_of_hid;
443 bool pan_exist;
444 bool unknown_acl_exist;
445 char min_bt_rssi;
446};
447
448struct btc_statistics {
449 u32 cnt_bind;
450 u32 cnt_init_hw_config;
451 u32 cnt_init_coex_dm;
452 u32 cnt_ips_notify;
453 u32 cnt_lps_notify;
454 u32 cnt_scan_notify;
455 u32 cnt_connect_notify;
456 u32 cnt_media_status_notify;
457 u32 cnt_special_packet_notify;
458 u32 cnt_bt_info_notify;
459 u32 cnt_periodical;
460 u32 cnt_stack_operation_notify;
461 u32 cnt_dbg_ctrl;
462};
463
464struct btc_bt_link_info {
465 bool bt_link_exist;
466 bool sco_exist;
467 bool sco_only;
468 bool a2dp_exist;
469 bool a2dp_only;
470 bool hid_exist;
471 bool hid_only;
472 bool pan_exist;
473 bool pan_only;
474};
475
476enum btc_antenna_pos {
477 BTC_ANTENNA_AT_MAIN_PORT = 0x1,
478 BTC_ANTENNA_AT_AUX_PORT = 0x2,
479};
480
481struct btc_coexist {
482 /* make sure only one adapter can bind the data context */
483 bool binded;
484 /* default adapter */
485 void *adapter;
486 struct btc_board_info board_info;
487 /* some bt info referenced by non-bt module */
488 struct btc_bt_info bt_info;
489 struct btc_stack_info stack_info;
490 enum btc_chip_interface chip_interface;
491 struct btc_bt_link_info bt_link_info;
492
493 bool initilized;
494 bool stop_coex_dm;
495 bool manual_control;
496 u8 *cli_buf;
497 struct btc_statistics statistics;
498 u8 pwr_mode_val[10];
499
500 /* function pointers - io related */
501 bfp_btc_r1 btc_read_1byte;
502 bfp_btc_w1 btc_write_1byte;
503 bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
504 bfp_btc_r2 btc_read_2byte;
505 bfp_btc_w2 btc_write_2byte;
506 bfp_btc_r4 btc_read_4byte;
507 bfp_btc_w4 btc_write_4byte;
508
509 bfp_btc_set_bb_reg btc_set_bb_reg;
510 bfp_btc_get_bb_reg btc_get_bb_reg;
511
512
513 bfp_btc_set_rf_reg btc_set_rf_reg;
514 bfp_btc_get_rf_reg btc_get_rf_reg;
515
516 bfp_btc_fill_h2c btc_fill_h2c;
517
518 bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
519
520 bfp_btc_get btc_get;
521 bfp_btc_set btc_set;
522};
523
524bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
525
526extern struct btc_coexist gl_bt_coexist;
527
528bool exhalbtc_initlize_variables(struct rtl_priv *adapter);
529void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
530void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
531void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
532void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
533void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
534void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
535void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
536 enum _RT_MEDIA_STATUS media_status);
537void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
538void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
539 u8 length);
540void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
541void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
542void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
543void exhalbtc_periodical(struct btc_coexist *btcoexist);
544void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
545 u8 *data);
546void exhalbtc_stack_update_profile_info(void);
547void exhalbtc_set_hci_version(u16 hci_version);
548void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
549void exhalbtc_update_min_bt_rssi(char bt_rssi);
550void exhalbtc_set_bt_exist(bool bt_exist);
551void exhalbtc_set_chip_type(u8 chip_type);
552void exhalbtc_set_ant_num(u8 type, u8 ant_num);
553void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
554void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
555 u8 *rssi_wifi, u8 *rssi_bt);
556void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
557void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
558
559#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
new file mode 100644
index 000000000000..0ab94fe4cbbe
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
@@ -0,0 +1,218 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "rtl_btc.h"
28#include "halbt_precomp.h"
29
30#include <linux/vmalloc.h>
31#include <linux/module.h>
32
33static struct rtl_btc_ops rtl_btc_operation = {
34 .btc_init_variables = rtl_btc_init_variables,
35 .btc_init_hal_vars = rtl_btc_init_hal_vars,
36 .btc_init_hw_config = rtl_btc_init_hw_config,
37 .btc_ips_notify = rtl_btc_ips_notify,
38 .btc_scan_notify = rtl_btc_scan_notify,
39 .btc_connect_notify = rtl_btc_connect_notify,
40 .btc_mediastatus_notify = rtl_btc_mediastatus_notify,
41 .btc_periodical = rtl_btc_periodical,
42 .btc_halt_notify = rtl_btc_halt_notify,
43 .btc_btinfo_notify = rtl_btc_btinfo_notify,
44 .btc_is_limited_dig = rtl_btc_is_limited_dig,
45 .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
46 .btc_is_bt_disabled = rtl_btc_is_bt_disabled,
47};
48
49void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
50{
51 exhalbtc_initlize_variables(rtlpriv);
52}
53
54void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
55{
56 u8 ant_num;
57 u8 bt_exist;
58 u8 bt_type;
59
60 ant_num = rtl_get_hwpg_ant_num(rtlpriv);
61 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
62 "%s, antNum is %d\n", __func__, ant_num);
63
64 bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
65 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
66 "%s, bt_exist is %d\n", __func__, bt_exist);
67 exhalbtc_set_bt_exist(bt_exist);
68
69 bt_type = rtl_get_hwpg_bt_type(rtlpriv);
70 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s, bt_type is %d\n",
71 __func__, bt_type);
72 exhalbtc_set_chip_type(bt_type);
73
74 exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
75}
76
77void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
78{
79 exhalbtc_init_hw_config(&gl_bt_coexist);
80 exhalbtc_init_coex_dm(&gl_bt_coexist);
81}
82
83void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
84{
85 exhalbtc_ips_notify(&gl_bt_coexist, type);
86}
87
88void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
89{
90 exhalbtc_scan_notify(&gl_bt_coexist, scantype);
91}
92
93void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
94{
95 exhalbtc_connect_notify(&gl_bt_coexist, action);
96}
97
98void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
99 enum _RT_MEDIA_STATUS mstatus)
100{
101 exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
102}
103
104void rtl_btc_periodical(struct rtl_priv *rtlpriv)
105{
106 exhalbtc_periodical(&gl_bt_coexist);
107}
108
109void rtl_btc_halt_notify(void)
110{
111 exhalbtc_halt_notify(&gl_bt_coexist);
112}
113
114void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
115{
116 exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
117}
118
119bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
120{
121 return gl_bt_coexist.bt_info.limited_dig;
122}
123
124bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
125{
126 bool bt_change_edca = false;
127 u32 cur_edca_val;
128 u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
129 u32 edca_hs;
130 u32 edca_addr = 0x504;
131
132 cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
133 if (halbtc_is_wifi_uplink(rtlpriv)) {
134 if (cur_edca_val != edca_bt_hs_uplink) {
135 edca_hs = edca_bt_hs_uplink;
136 bt_change_edca = true;
137 }
138 } else {
139 if (cur_edca_val != edca_bt_hs_downlink) {
140 edca_hs = edca_bt_hs_downlink;
141 bt_change_edca = true;
142 }
143 }
144
145 if (bt_change_edca)
146 rtl_write_dword(rtlpriv, edca_addr, edca_hs);
147
148 return true;
149}
150
151bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
152{
153 if (gl_bt_coexist.bt_info.bt_disabled)
154 return true;
155 else
156 return false;
157}
158
159struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
160{
161 return &rtl_btc_operation;
162}
163EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
164
165u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
166{
167 u8 num;
168
169 if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
170 num = 2;
171 else
172 num = 1;
173
174 return num;
175}
176
177enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw)
178{
179 struct rtl_priv *rtlpriv = rtl_priv(hw);
180 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
181 enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT;
182
183 u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
184
185 if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED)
186 m_status = RT_MEDIA_CONNECT;
187
188 return m_status;
189}
190
191u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
192{
193 return rtlpriv->btcoexist.btc_info.btcoexist;
194}
195
196u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
197{
198 return rtlpriv->btcoexist.btc_info.bt_type;
199}
200
201MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
202MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
203MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
204MODULE_LICENSE("GPL");
205MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
206
207static int __init rtl_btcoexist_module_init(void)
208{
209 return 0;
210}
211
212static void __exit rtl_btcoexist_module_exit(void)
213{
214 return;
215}
216
217module_init(rtl_btcoexist_module_init);
218module_exit(rtl_btcoexist_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
new file mode 100644
index 000000000000..805b22cc8fc8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
@@ -0,0 +1,52 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 * Larry Finger <Larry.Finger@lwfinger.net>
22 *
23 *****************************************************************************/
24
25#ifndef __RTL_BTC_H__
26#define __RTL_BTC_H__
27
28#include "halbt_precomp.h"
29
30void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
31void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
32void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
33void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
34void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
35void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
36void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
37 enum _RT_MEDIA_STATUS mstatus);
38void rtl_btc_periodical(struct rtl_priv *rtlpriv);
39void rtl_btc_halt_notify(void);
40void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
41bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
42bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
43bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
44
45struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
46
47u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
48u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
49u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
50enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw);
51
52#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 2d337a0c3df0..4ec424f26672 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -36,6 +36,66 @@
36 36
37#include <linux/export.h> 37#include <linux/export.h>
38 38
39void rtl_addr_delay(u32 addr)
40{
41 if (addr == 0xfe)
42 mdelay(50);
43 else if (addr == 0xfd)
44 mdelay(5);
45 else if (addr == 0xfc)
46 mdelay(1);
47 else if (addr == 0xfb)
48 udelay(50);
49 else if (addr == 0xfa)
50 udelay(5);
51 else if (addr == 0xf9)
52 udelay(1);
53}
54EXPORT_SYMBOL(rtl_addr_delay);
55
56void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
57 u32 mask, u32 data)
58{
59 if (addr == 0xfe) {
60 mdelay(50);
61 } else if (addr == 0xfd) {
62 mdelay(5);
63 } else if (addr == 0xfc) {
64 mdelay(1);
65 } else if (addr == 0xfb) {
66 udelay(50);
67 } else if (addr == 0xfa) {
68 udelay(5);
69 } else if (addr == 0xf9) {
70 udelay(1);
71 } else {
72 rtl_set_rfreg(hw, rfpath, addr, mask, data);
73 udelay(1);
74 }
75}
76EXPORT_SYMBOL(rtl_rfreg_delay);
77
78void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
79{
80 if (addr == 0xfe) {
81 mdelay(50);
82 } else if (addr == 0xfd) {
83 mdelay(5);
84 } else if (addr == 0xfc) {
85 mdelay(1);
86 } else if (addr == 0xfb) {
87 udelay(50);
88 } else if (addr == 0xfa) {
89 udelay(5);
90 } else if (addr == 0xf9) {
91 udelay(1);
92 } else {
93 rtl_set_bbreg(hw, addr, MASKDWORD, data);
94 udelay(1);
95 }
96}
97EXPORT_SYMBOL(rtl_bb_delay);
98
39void rtl_fw_cb(const struct firmware *firmware, void *context) 99void rtl_fw_cb(const struct firmware *firmware, void *context)
40{ 100{
41 struct ieee80211_hw *hw = context; 101 struct ieee80211_hw *hw = context;
@@ -475,20 +535,40 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
475{ 535{
476 struct rtl_priv *rtlpriv = rtl_priv(hw); 536 struct rtl_priv *rtlpriv = rtl_priv(hw);
477 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 537 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
538 u32 rx_conf;
478 539
479 *new_flags &= RTL_SUPPORTED_FILTERS; 540 *new_flags &= RTL_SUPPORTED_FILTERS;
480 if (!changed_flags) 541 if (!changed_flags)
481 return; 542 return;
482 543
544 /* if ssid not set to hw don't check bssid
545 * here just used for linked scanning, & linked
546 * and nolink check bssid is set in set network_type */
547 if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
548 (mac->link_state >= MAC80211_LINKED)) {
549 if (mac->opmode != NL80211_IFTYPE_AP &&
550 mac->opmode != NL80211_IFTYPE_MESH_POINT) {
551 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
552 rtlpriv->cfg->ops->set_chk_bssid(hw, false);
553 } else {
554 rtlpriv->cfg->ops->set_chk_bssid(hw, true);
555 }
556 }
557 }
558
559 /* must be called after set_chk_bssid since that function modifies the
560 * RCR register too. */
561 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
562
483 /*TODO: we disable broadcase now, so enable here */ 563 /*TODO: we disable broadcase now, so enable here */
484 if (changed_flags & FIF_ALLMULTI) { 564 if (changed_flags & FIF_ALLMULTI) {
485 if (*new_flags & FIF_ALLMULTI) { 565 if (*new_flags & FIF_ALLMULTI) {
486 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] | 566 rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
487 rtlpriv->cfg->maps[MAC_RCR_AB]; 567 rtlpriv->cfg->maps[MAC_RCR_AB];
488 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 568 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
489 "Enable receive multicast frame\n"); 569 "Enable receive multicast frame\n");
490 } else { 570 } else {
491 mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] | 571 rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
492 rtlpriv->cfg->maps[MAC_RCR_AB]); 572 rtlpriv->cfg->maps[MAC_RCR_AB]);
493 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 573 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
494 "Disable receive multicast frame\n"); 574 "Disable receive multicast frame\n");
@@ -497,39 +577,25 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
497 577
498 if (changed_flags & FIF_FCSFAIL) { 578 if (changed_flags & FIF_FCSFAIL) {
499 if (*new_flags & FIF_FCSFAIL) { 579 if (*new_flags & FIF_FCSFAIL) {
500 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32]; 580 rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
501 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 581 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
502 "Enable receive FCS error frame\n"); 582 "Enable receive FCS error frame\n");
503 } else { 583 } else {
504 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32]; 584 rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
505 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 585 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
506 "Disable receive FCS error frame\n"); 586 "Disable receive FCS error frame\n");
507 } 587 }
508 } 588 }
509 589
510 /* if ssid not set to hw don't check bssid
511 * here just used for linked scanning, & linked
512 * and nolink check bssid is set in set network_type */
513 if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
514 (mac->link_state >= MAC80211_LINKED)) {
515 if (mac->opmode != NL80211_IFTYPE_AP &&
516 mac->opmode != NL80211_IFTYPE_MESH_POINT) {
517 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
518 rtlpriv->cfg->ops->set_chk_bssid(hw, false);
519 } else {
520 rtlpriv->cfg->ops->set_chk_bssid(hw, true);
521 }
522 }
523 }
524 590
525 if (changed_flags & FIF_CONTROL) { 591 if (changed_flags & FIF_CONTROL) {
526 if (*new_flags & FIF_CONTROL) { 592 if (*new_flags & FIF_CONTROL) {
527 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF]; 593 rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
528 594
529 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 595 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
530 "Enable receive control frame\n"); 596 "Enable receive control frame\n");
531 } else { 597 } else {
532 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF]; 598 rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
533 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 599 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
534 "Disable receive control frame\n"); 600 "Disable receive control frame\n");
535 } 601 }
@@ -537,15 +603,17 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
537 603
538 if (changed_flags & FIF_OTHER_BSS) { 604 if (changed_flags & FIF_OTHER_BSS) {
539 if (*new_flags & FIF_OTHER_BSS) { 605 if (*new_flags & FIF_OTHER_BSS) {
540 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP]; 606 rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
541 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 607 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
542 "Enable receive other BSS's frame\n"); 608 "Enable receive other BSS's frame\n");
543 } else { 609 } else {
544 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP]; 610 rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
545 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 611 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
546 "Disable receive other BSS's frame\n"); 612 "Disable receive other BSS's frame\n");
547 } 613 }
548 } 614 }
615
616 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
549} 617}
550static int rtl_op_sta_add(struct ieee80211_hw *hw, 618static int rtl_op_sta_add(struct ieee80211_hw *hw,
551 struct ieee80211_vif *vif, 619 struct ieee80211_vif *vif,
@@ -738,6 +806,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
738 rtlpriv->cfg->ops->linked_set_reg(hw); 806 rtlpriv->cfg->ops->linked_set_reg(hw);
739 rcu_read_lock(); 807 rcu_read_lock();
740 sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid); 808 sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
809 if (!sta) {
810 pr_err("ieee80211_find_sta returned NULL\n");
811 rcu_read_unlock();
812 goto out;
813 }
741 814
742 if (vif->type == NL80211_IFTYPE_STATION && sta) 815 if (vif->type == NL80211_IFTYPE_STATION && sta)
743 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); 816 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
@@ -892,7 +965,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
892 965
893 mac->basic_rates = basic_rates; 966 mac->basic_rates = basic_rates;
894 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, 967 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
895 (u8 *) (&basic_rates)); 968 (u8 *)(&basic_rates));
896 } 969 }
897 rcu_read_unlock(); 970 rcu_read_unlock();
898 } 971 }
@@ -906,6 +979,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
906 if (bss_conf->assoc) { 979 if (bss_conf->assoc) {
907 if (ppsc->fwctrl_lps) { 980 if (ppsc->fwctrl_lps) {
908 u8 mstatus = RT_MEDIA_CONNECT; 981 u8 mstatus = RT_MEDIA_CONNECT;
982 u8 keep_alive = 10;
983 rtlpriv->cfg->ops->set_hw_reg(hw,
984 HW_VAR_KEEP_ALIVE,
985 &keep_alive);
986
909 rtlpriv->cfg->ops->set_hw_reg(hw, 987 rtlpriv->cfg->ops->set_hw_reg(hw,
910 HW_VAR_H2C_FW_JOINBSSRPT, 988 HW_VAR_H2C_FW_JOINBSSRPT,
911 &mstatus); 989 &mstatus);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 2fe46a1b4f1f..027e75374dcc 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -41,5 +41,9 @@
41 41
42extern const struct ieee80211_ops rtl_ops; 42extern const struct ieee80211_ops rtl_ops;
43void rtl_fw_cb(const struct firmware *firmware, void *context); 43void rtl_fw_cb(const struct firmware *firmware, void *context);
44void rtl_addr_delay(u32 addr);
45void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
46 u32 mask, u32 data);
47void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
44 48
45#endif 49#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index d7aa165fe677..dae55257f0e8 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -811,19 +811,19 @@ done:
811 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) 811 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
812 return; 812 return;
813 tmp_one = 1; 813 tmp_one = 1;
814 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false, 814 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
815 HW_DESC_RXBUFF_ADDR, 815 HW_DESC_RXBUFF_ADDR,
816 (u8 *)&bufferaddress); 816 (u8 *)&bufferaddress);
817 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, 817 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
818 HW_DESC_RXPKT_LEN, 818 HW_DESC_RXPKT_LEN,
819 (u8 *)&rtlpci->rxbuffersize); 819 (u8 *)&rtlpci->rxbuffersize);
820 820
821 if (index == rtlpci->rxringcount - 1) 821 if (index == rtlpci->rxringcount - 1)
822 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, 822 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
823 HW_DESC_RXERO, 823 HW_DESC_RXERO,
824 &tmp_one); 824 &tmp_one);
825 825
826 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN, 826 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, HW_DESC_RXOWN,
827 &tmp_one); 827 &tmp_one);
828 828
829 index = (index + 1) % rtlpci->rxringcount; 829 index = (index + 1) % rtlpci->rxringcount;
@@ -983,6 +983,8 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
983 struct sk_buff *pskb = NULL; 983 struct sk_buff *pskb = NULL;
984 struct rtl_tx_desc *pdesc = NULL; 984 struct rtl_tx_desc *pdesc = NULL;
985 struct rtl_tcb_desc tcb_desc; 985 struct rtl_tcb_desc tcb_desc;
986 /*This is for new trx flow*/
987 struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
986 u8 temp_one = 1; 988 u8 temp_one = 1;
987 989
988 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 990 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
@@ -1004,11 +1006,12 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1004 info = IEEE80211_SKB_CB(pskb); 1006 info = IEEE80211_SKB_CB(pskb);
1005 pdesc = &ring->desc[0]; 1007 pdesc = &ring->desc[0];
1006 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 1008 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1007 info, NULL, pskb, BEACON_QUEUE, &tcb_desc); 1009 (u8 *)pbuffer_desc, info, NULL, pskb,
1010 BEACON_QUEUE, &tcb_desc);
1008 1011
1009 __skb_queue_tail(&ring->queue, pskb); 1012 __skb_queue_tail(&ring->queue, pskb);
1010 1013
1011 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN, 1014 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
1012 &temp_one); 1015 &temp_one);
1013 1016
1014 return; 1017 return;
@@ -1066,7 +1069,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1066 mac->current_ampdu_factor = 3; 1069 mac->current_ampdu_factor = 3;
1067 1070
1068 /*QOS*/ 1071 /*QOS*/
1069 rtlpci->acm_method = eAcmWay2_SW; 1072 rtlpci->acm_method = EACMWAY2_SW;
1070 1073
1071 /*task */ 1074 /*task */
1072 tasklet_init(&rtlpriv->works.irq_tasklet, 1075 tasklet_init(&rtlpriv->works.irq_tasklet,
@@ -1113,7 +1116,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1113 ((i + 1) % entries) * 1116 ((i + 1) % entries) *
1114 sizeof(*ring); 1117 sizeof(*ring);
1115 1118
1116 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]), 1119 rtlpriv->cfg->ops->set_desc(hw, (u8 *)&(ring[i]),
1117 true, HW_DESC_TX_NEXTDESC_ADDR, 1120 true, HW_DESC_TX_NEXTDESC_ADDR,
1118 (u8 *)&nextdescaddress); 1121 (u8 *)&nextdescaddress);
1119 } 1122 }
@@ -1188,19 +1191,19 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1188 dev_kfree_skb_any(skb); 1191 dev_kfree_skb_any(skb);
1189 return 1; 1192 return 1;
1190 } 1193 }
1191 rtlpriv->cfg->ops->set_desc((u8 *)entry, false, 1194 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1192 HW_DESC_RXBUFF_ADDR, 1195 HW_DESC_RXBUFF_ADDR,
1193 (u8 *)&bufferaddress); 1196 (u8 *)&bufferaddress);
1194 rtlpriv->cfg->ops->set_desc((u8 *)entry, false, 1197 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1195 HW_DESC_RXPKT_LEN, 1198 HW_DESC_RXPKT_LEN,
1196 (u8 *)&rtlpci-> 1199 (u8 *)&rtlpci->
1197 rxbuffersize); 1200 rxbuffersize);
1198 rtlpriv->cfg->ops->set_desc((u8 *) entry, false, 1201 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1199 HW_DESC_RXOWN, 1202 HW_DESC_RXOWN,
1200 &tmp_one); 1203 &tmp_one);
1201 } 1204 }
1202 1205
1203 rtlpriv->cfg->ops->set_desc((u8 *) entry, false, 1206 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1204 HW_DESC_RXERO, &tmp_one); 1207 HW_DESC_RXERO, &tmp_one);
1205 } 1208 }
1206 return 0; 1209 return 0;
@@ -1331,7 +1334,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1331 1334
1332 for (i = 0; i < rtlpci->rxringcount; i++) { 1335 for (i = 0; i < rtlpci->rxringcount; i++) {
1333 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; 1336 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1334 rtlpriv->cfg->ops->set_desc((u8 *) entry, 1337 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry,
1335 false, 1338 false,
1336 HW_DESC_RXOWN, 1339 HW_DESC_RXOWN,
1337 &tmp_one); 1340 &tmp_one);
@@ -1424,6 +1427,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
1424 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1427 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1425 struct rtl8192_tx_ring *ring; 1428 struct rtl8192_tx_ring *ring;
1426 struct rtl_tx_desc *pdesc; 1429 struct rtl_tx_desc *pdesc;
1430 struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
1427 u8 idx; 1431 u8 idx;
1428 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb); 1432 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1429 unsigned long flags; 1433 unsigned long flags;
@@ -1464,17 +1468,22 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
1464 idx = 0; 1468 idx = 0;
1465 1469
1466 pdesc = &ring->desc[idx]; 1470 pdesc = &ring->desc[idx];
1467 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, 1471 if (rtlpriv->use_new_trx_flow) {
1468 true, HW_DESC_OWN); 1472 ptx_bd_desc = &ring->buffer_desc[idx];
1473 } else {
1474 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
1475 true, HW_DESC_OWN);
1469 1476
1470 if ((own == 1) && (hw_queue != BEACON_QUEUE)) { 1477 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1471 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 1478 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1472 "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n", 1479 "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1473 hw_queue, ring->idx, idx, 1480 hw_queue, ring->idx, idx,
1474 skb_queue_len(&ring->queue)); 1481 skb_queue_len(&ring->queue));
1475 1482
1476 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); 1483 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1477 return skb->len; 1484 flags);
1485 return skb->len;
1486 }
1478 } 1487 }
1479 1488
1480 if (ieee80211_is_data_qos(fc)) { 1489 if (ieee80211_is_data_qos(fc)) {
@@ -1494,17 +1503,20 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
1494 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 1503 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1495 1504
1496 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, 1505 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1497 info, sta, skb, hw_queue, ptcb_desc); 1506 (u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc);
1498 1507
1499 __skb_queue_tail(&ring->queue, skb); 1508 __skb_queue_tail(&ring->queue, skb);
1500 1509
1501 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true, 1510 if (rtlpriv->use_new_trx_flow) {
1502 HW_DESC_OWN, &temp_one); 1511 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1503 1512 HW_DESC_OWN, &hw_queue);
1513 } else {
1514 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1515 HW_DESC_OWN, &temp_one);
1516 }
1504 1517
1505 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && 1518 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1506 hw_queue != BEACON_QUEUE) { 1519 hw_queue != BEACON_QUEUE) {
1507
1508 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, 1520 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
1509 "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n", 1521 "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1510 hw_queue, ring->idx, idx, 1522 hw_queue, ring->idx, idx,
@@ -1841,6 +1853,65 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1841 return true; 1853 return true;
1842} 1854}
1843 1855
1856static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
1857{
1858 struct rtl_priv *rtlpriv = rtl_priv(hw);
1859 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1860 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1861 int ret;
1862
1863 ret = pci_enable_msi(rtlpci->pdev);
1864 if (ret < 0)
1865 return ret;
1866
1867 ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1868 IRQF_SHARED, KBUILD_MODNAME, hw);
1869 if (ret < 0) {
1870 pci_disable_msi(rtlpci->pdev);
1871 return ret;
1872 }
1873
1874 rtlpci->using_msi = true;
1875
1876 RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
1877 "MSI Interrupt Mode!\n");
1878 return 0;
1879}
1880
1881static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
1882{
1883 struct rtl_priv *rtlpriv = rtl_priv(hw);
1884 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1885 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1886 int ret;
1887
1888 ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1889 IRQF_SHARED, KBUILD_MODNAME, hw);
1890 if (ret < 0)
1891 return ret;
1892
1893 rtlpci->using_msi = false;
1894 RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
1895 "Pin-based Interrupt Mode!\n");
1896 return 0;
1897}
1898
1899static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
1900{
1901 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1902 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1903 int ret;
1904
1905 if (rtlpci->msi_support) {
1906 ret = rtl_pci_intr_mode_msi(hw);
1907 if (ret < 0)
1908 ret = rtl_pci_intr_mode_legacy(hw);
1909 } else {
1910 ret = rtl_pci_intr_mode_legacy(hw);
1911 }
1912 return ret;
1913}
1914
1844int rtl_pci_probe(struct pci_dev *pdev, 1915int rtl_pci_probe(struct pci_dev *pdev,
1845 const struct pci_device_id *id) 1916 const struct pci_device_id *id)
1846{ 1917{
@@ -1983,8 +2054,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
1983 } 2054 }
1984 2055
1985 rtlpci = rtl_pcidev(pcipriv); 2056 rtlpci = rtl_pcidev(pcipriv);
1986 err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, 2057 err = rtl_pci_intr_mode_decide(hw);
1987 IRQF_SHARED, KBUILD_MODNAME, hw);
1988 if (err) { 2058 if (err) {
1989 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 2059 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1990 "%s: failed to register IRQ handler\n", 2060 "%s: failed to register IRQ handler\n",
@@ -2052,6 +2122,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
2052 rtlpci->irq_alloc = 0; 2122 rtlpci->irq_alloc = 0;
2053 } 2123 }
2054 2124
2125 if (rtlpci->using_msi)
2126 pci_disable_msi(rtlpci->pdev);
2127
2055 list_del(&rtlpriv->list); 2128 list_del(&rtlpriv->list);
2056 if (rtlpriv->io.pci_mem_start != 0) { 2129 if (rtlpriv->io.pci_mem_start != 0) {
2057 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); 2130 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index d3262ec45d23..90174a814a6d 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -137,12 +137,22 @@ struct rtl_tx_cmd_desc {
137 u32 dword[16]; 137 u32 dword[16];
138} __packed; 138} __packed;
139 139
140/* In new TRX flow, Buffer_desc is new concept
141 * But TX wifi info == TX descriptor in old flow
142 * RX wifi info == RX descriptor in old flow
143 */
144struct rtl_tx_buffer_desc {
145 u32 dword[8]; /*seg = 4*/
146} __packed;
147
140struct rtl8192_tx_ring { 148struct rtl8192_tx_ring {
141 struct rtl_tx_desc *desc; 149 struct rtl_tx_desc *desc;
142 dma_addr_t dma; 150 dma_addr_t dma;
143 unsigned int idx; 151 unsigned int idx;
144 unsigned int entries; 152 unsigned int entries;
145 struct sk_buff_head queue; 153 struct sk_buff_head queue;
154 /*add for new trx flow*/
155 struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
146}; 156};
147 157
148struct rtl8192_rx_ring { 158struct rtl8192_rx_ring {
@@ -199,6 +209,10 @@ struct rtl_pci {
199 209
200 u16 shortretry_limit; 210 u16 shortretry_limit;
201 u16 longretry_limit; 211 u16 longretry_limit;
212
213 /* MSI support */
214 bool msi_support;
215 bool using_msi;
202}; 216};
203 217
204struct mp_adapter { 218struct mp_adapter {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index d1c0191a195b..50504942ded1 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -32,6 +32,106 @@
32#include "base.h" 32#include "base.h"
33#include "ps.h" 33#include "ps.h"
34 34
35/* Description:
36 * This routine deals with the Power Configuration CMD
37 * parsing for RTL8723/RTL8188E Series IC.
38 * Assumption:
39 * We should follow specific format that was released from HW SD.
40 */
41bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
42 u8 faversion, u8 interface_type,
43 struct wlan_pwr_cfg pwrcfgcmd[])
44{
45 struct wlan_pwr_cfg cfg_cmd = {0};
46 bool polling_bit = false;
47 u32 ary_idx = 0;
48 u8 value = 0;
49 u32 offset = 0;
50 u32 polling_count = 0;
51 u32 max_polling_cnt = 5000;
52
53 do {
54 cfg_cmd = pwrcfgcmd[ary_idx];
55 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
56 "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x),"
57 "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
58 GET_PWR_CFG_OFFSET(cfg_cmd),
59 GET_PWR_CFG_CUT_MASK(cfg_cmd),
60 GET_PWR_CFG_FAB_MASK(cfg_cmd),
61 GET_PWR_CFG_INTF_MASK(cfg_cmd),
62 GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
63 GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
64
65 if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
66 (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
67 (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
68 switch (GET_PWR_CFG_CMD(cfg_cmd)) {
69 case PWR_CMD_READ:
70 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
71 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
72 break;
73 case PWR_CMD_WRITE:
74 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
75 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
76 offset = GET_PWR_CFG_OFFSET(cfg_cmd);
77
78 /*Read the value from system register*/
79 value = rtl_read_byte(rtlpriv, offset);
80 value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
81 value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
82 GET_PWR_CFG_MASK(cfg_cmd));
83
84 /*Write the value back to sytem register*/
85 rtl_write_byte(rtlpriv, offset, value);
86 break;
87 case PWR_CMD_POLLING:
88 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
89 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
90 polling_bit = false;
91 offset = GET_PWR_CFG_OFFSET(cfg_cmd);
92
93 do {
94 value = rtl_read_byte(rtlpriv, offset);
95
96 value &= GET_PWR_CFG_MASK(cfg_cmd);
97 if (value ==
98 (GET_PWR_CFG_VALUE(cfg_cmd)
99 & GET_PWR_CFG_MASK(cfg_cmd)))
100 polling_bit = true;
101 else
102 udelay(10);
103
104 if (polling_count++ > max_polling_cnt)
105 return false;
106 } while (!polling_bit);
107 break;
108 case PWR_CMD_DELAY:
109 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
110 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
111 if (GET_PWR_CFG_VALUE(cfg_cmd) ==
112 PWRSEQ_DELAY_US)
113 udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
114 else
115 mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
116 break;
117 case PWR_CMD_END:
118 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
119 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
120 return true;
121 default:
122 RT_ASSERT(false,
123 "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
124 break;
125 }
126
127 }
128 ary_idx++;
129 } while (1);
130
131 return true;
132}
133EXPORT_SYMBOL(rtl_hal_pwrseqcmdparsing);
134
35bool rtl_ps_enable_nic(struct ieee80211_hw *hw) 135bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
36{ 136{
37 struct rtl_priv *rtlpriv = rtl_priv(hw); 137 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -659,7 +759,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
659 unsigned int len) 759 unsigned int len)
660{ 760{
661 struct rtl_priv *rtlpriv = rtl_priv(hw); 761 struct rtl_priv *rtlpriv = rtl_priv(hw);
662 struct ieee80211_mgmt *mgmt = (void *)data; 762 struct ieee80211_mgmt *mgmt = data;
663 struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info); 763 struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
664 u8 *pos, *end, *ie; 764 u8 *pos, *end, *ie;
665 u16 noa_len; 765 u16 noa_len;
@@ -758,7 +858,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
758 unsigned int len) 858 unsigned int len)
759{ 859{
760 struct rtl_priv *rtlpriv = rtl_priv(hw); 860 struct rtl_priv *rtlpriv = rtl_priv(hw);
761 struct ieee80211_mgmt *mgmt = (void *)data; 861 struct ieee80211_mgmt *mgmt = data;
762 struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info); 862 struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
763 u8 noa_num, index, i, noa_index = 0; 863 u8 noa_num, index, i, noa_index = 0;
764 u8 *pos, *end, *ie; 864 u8 *pos, *end, *ie;
@@ -850,9 +950,8 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
850 switch (p2p_ps_state) { 950 switch (p2p_ps_state) {
851 case P2P_PS_DISABLE: 951 case P2P_PS_DISABLE:
852 p2pinfo->p2p_ps_state = p2p_ps_state; 952 p2pinfo->p2p_ps_state = p2p_ps_state;
853 rtlpriv->cfg->ops->set_hw_reg(hw, 953 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
854 HW_VAR_H2C_FW_P2P_PS_OFFLOAD, 954 &p2p_ps_state);
855 (u8 *)(&p2p_ps_state));
856 955
857 p2pinfo->noa_index = 0; 956 p2pinfo->noa_index = 0;
858 p2pinfo->ctwindow = 0; 957 p2pinfo->ctwindow = 0;
@@ -864,7 +963,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
864 rtlps->smart_ps = 2; 963 rtlps->smart_ps = 2;
865 rtlpriv->cfg->ops->set_hw_reg(hw, 964 rtlpriv->cfg->ops->set_hw_reg(hw,
866 HW_VAR_H2C_FW_PWRMODE, 965 HW_VAR_H2C_FW_PWRMODE,
867 (u8 *)(&rtlps->pwr_mode)); 966 &rtlps->pwr_mode);
868 } 967 }
869 } 968 }
870 break; 969 break;
@@ -877,12 +976,12 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
877 rtlps->smart_ps = 0; 976 rtlps->smart_ps = 0;
878 rtlpriv->cfg->ops->set_hw_reg(hw, 977 rtlpriv->cfg->ops->set_hw_reg(hw,
879 HW_VAR_H2C_FW_PWRMODE, 978 HW_VAR_H2C_FW_PWRMODE,
880 (u8 *)(&rtlps->pwr_mode)); 979 &rtlps->pwr_mode);
881 } 980 }
882 } 981 }
883 rtlpriv->cfg->ops->set_hw_reg(hw, 982 rtlpriv->cfg->ops->set_hw_reg(hw,
884 HW_VAR_H2C_FW_P2P_PS_OFFLOAD, 983 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
885 (u8 *)(&p2p_ps_state)); 984 &p2p_ps_state);
886 } 985 }
887 break; 986 break;
888 case P2P_PS_SCAN: 987 case P2P_PS_SCAN:
@@ -892,7 +991,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
892 p2pinfo->p2p_ps_state = p2p_ps_state; 991 p2pinfo->p2p_ps_state = p2p_ps_state;
893 rtlpriv->cfg->ops->set_hw_reg(hw, 992 rtlpriv->cfg->ops->set_hw_reg(hw,
894 HW_VAR_H2C_FW_P2P_PS_OFFLOAD, 993 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
895 (u8 *)(&p2p_ps_state)); 994 &p2p_ps_state);
896 } 995 }
897 break; 996 break;
898 default: 997 default:
@@ -912,7 +1011,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
912{ 1011{
913 struct rtl_priv *rtlpriv = rtl_priv(hw); 1012 struct rtl_priv *rtlpriv = rtl_priv(hw);
914 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1013 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
915 struct ieee80211_hdr *hdr = (void *)data; 1014 struct ieee80211_hdr *hdr = data;
916 1015
917 if (!mac->p2p) 1016 if (!mac->p2p)
918 return; 1017 return;
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 88bd76ea88f7..3bd41f958974 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -32,6 +32,66 @@
32 32
33#define MAX_SW_LPS_SLEEP_INTV 5 33#define MAX_SW_LPS_SLEEP_INTV 5
34 34
35/*---------------------------------------------
36 * 3 The value of cmd: 4 bits
37 *---------------------------------------------
38 */
39#define PWR_CMD_READ 0x00
40#define PWR_CMD_WRITE 0x01
41#define PWR_CMD_POLLING 0x02
42#define PWR_CMD_DELAY 0x03
43#define PWR_CMD_END 0x04
44
45/* define the base address of each block */
46#define PWR_BASEADDR_MAC 0x00
47#define PWR_BASEADDR_USB 0x01
48#define PWR_BASEADDR_PCIE 0x02
49#define PWR_BASEADDR_SDIO 0x03
50
51#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
52#define PWR_CUT_TESTCHIP_MSK BIT(0)
53#define PWR_CUT_A_MSK BIT(1)
54#define PWR_CUT_B_MSK BIT(2)
55#define PWR_CUT_C_MSK BIT(3)
56#define PWR_CUT_D_MSK BIT(4)
57#define PWR_CUT_E_MSK BIT(5)
58#define PWR_CUT_F_MSK BIT(6)
59#define PWR_CUT_G_MSK BIT(7)
60#define PWR_CUT_ALL_MSK 0xFF
61#define PWR_INTF_SDIO_MSK BIT(0)
62#define PWR_INTF_USB_MSK BIT(1)
63#define PWR_INTF_PCI_MSK BIT(2)
64#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
65
66enum pwrseq_delay_unit {
67 PWRSEQ_DELAY_US,
68 PWRSEQ_DELAY_MS,
69};
70
71struct wlan_pwr_cfg {
72 u16 offset;
73 u8 cut_msk;
74 u8 fab_msk:4;
75 u8 interface_msk:4;
76 u8 base:4;
77 u8 cmd:4;
78 u8 msk;
79 u8 value;
80};
81
82#define GET_PWR_CFG_OFFSET(__PWR_CMD) (__PWR_CMD.offset)
83#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) (__PWR_CMD.cut_msk)
84#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) (__PWR_CMD.fab_msk)
85#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) (__PWR_CMD.interface_msk)
86#define GET_PWR_CFG_BASE(__PWR_CMD) (__PWR_CMD.base)
87#define GET_PWR_CFG_CMD(__PWR_CMD) (__PWR_CMD.cmd)
88#define GET_PWR_CFG_MASK(__PWR_CMD) (__PWR_CMD.msk)
89#define GET_PWR_CFG_VALUE(__PWR_CMD) (__PWR_CMD.value)
90
91bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
92 u8 fab_version, u8 interface_type,
93 struct wlan_pwr_cfg pwrcfgcmd[]);
94
35bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, 95bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
36 enum rf_pwrstate state_toset, u32 changesource); 96 enum rf_pwrstate state_toset, u32 changesource);
37bool rtl_ps_enable_nic(struct ieee80211_hw *hw); 97bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index a98acefb8c06..ee28a1a3d010 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -260,8 +260,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
260 kfree(rate_priv); 260 kfree(rate_priv);
261} 261}
262 262
263static struct rate_control_ops rtl_rate_ops = { 263static const struct rate_control_ops rtl_rate_ops = {
264 .module = NULL,
265 .name = "rtl_rc", 264 .name = "rtl_rc",
266 .alloc = rtl_rate_alloc, 265 .alloc = rtl_rate_alloc,
267 .free = rtl_rate_free, 266 .free = rtl_rate_free,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
index 5b194e97f4b3..a85419a37651 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
@@ -5,7 +5,6 @@ rtl8188ee-objs := \
5 led.o \ 5 led.o \
6 phy.o \ 6 phy.o \
7 pwrseq.o \ 7 pwrseq.o \
8 pwrseqcmd.o \
9 rf.o \ 8 rf.o \
10 sw.o \ 9 sw.o \
11 table.o \ 10 table.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index a6184b6e1d57..f8daa61cf1c3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -235,7 +235,7 @@ void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw,
235 u8 pwr_val = 0; 235 u8 pwr_val = 0;
236 u8 cck_base = rtldm->swing_idx_cck_base; 236 u8 cck_base = rtldm->swing_idx_cck_base;
237 u8 cck_val = rtldm->swing_idx_cck; 237 u8 cck_val = rtldm->swing_idx_cck;
238 u8 ofdm_base = rtldm->swing_idx_ofdm_base; 238 u8 ofdm_base = rtldm->swing_idx_ofdm_base[0];
239 u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A]; 239 u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
240 240
241 if (type == 0) { 241 if (type == 0) {
@@ -726,7 +726,7 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
726 static u64 last_rx; 726 static u64 last_rx;
727 long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; 727 long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
728 728
729 if (rtlhal->oem_id == RT_CID_819x_HP) { 729 if (rtlhal->oem_id == RT_CID_819X_HP) {
730 u64 cur_txok_cnt = 0; 730 u64 cur_txok_cnt = 0;
731 u64 cur_rxok_cnt = 0; 731 u64 cur_rxok_cnt = 0;
732 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok; 732 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok;
@@ -851,9 +851,8 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
851 } else { 851 } else {
852 if (rtlpriv->dm.current_turbo_edca) { 852 if (rtlpriv->dm.current_turbo_edca) {
853 u8 tmp = AC0_BE; 853 u8 tmp = AC0_BE;
854 rtlpriv->cfg->ops->set_hw_reg(hw, 854 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
855 HW_VAR_AC_PARAM, 855 &tmp);
856 (u8 *)(&tmp));
857 rtlpriv->dm.current_turbo_edca = false; 856 rtlpriv->dm.current_turbo_edca = false;
858 } 857 }
859 } 858 }
@@ -912,7 +911,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
912 for (i = 0; i < OFDM_TABLE_LENGTH; i++) { 911 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
913 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { 912 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
914 ofdm_old[0] = (u8) i; 913 ofdm_old[0] = (u8) i;
915 rtldm->swing_idx_ofdm_base = (u8)i; 914 rtldm->swing_idx_ofdm_base[0] = (u8)i;
916 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 915 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
917 "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n", 916 "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
918 ROFDM0_XATXIQIMBAL, 917 ROFDM0_XATXIQIMBAL,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
index 557bc5b8327e..4f9376ad4739 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
@@ -119,7 +119,7 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
119 enum version_8188e version, u8 *buffer, u32 size) 119 enum version_8188e version, u8 *buffer, u32 size)
120{ 120{
121 struct rtl_priv *rtlpriv = rtl_priv(hw); 121 struct rtl_priv *rtlpriv = rtl_priv(hw);
122 u8 *buf_ptr = (u8 *)buffer; 122 u8 *buf_ptr = buffer;
123 u32 page_no, remain; 123 u32 page_no, remain;
124 u32 page, offset; 124 u32 page, offset;
125 125
@@ -213,7 +213,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
213 return 1; 213 return 1;
214 214
215 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; 215 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
216 pfwdata = (u8 *)rtlhal->pfirmware; 216 pfwdata = rtlhal->pfirmware;
217 fwsize = rtlhal->fwsize; 217 fwsize = rtlhal->fwsize;
218 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, 218 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
219 "normal Firmware SIZE %d\n", fwsize); 219 "normal Firmware SIZE %d\n", fwsize);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index e06971be7df7..94cd9df98381 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -41,7 +41,6 @@
41#include "fw.h" 41#include "fw.h"
42#include "led.h" 42#include "led.h"
43#include "hw.h" 43#include "hw.h"
44#include "pwrseqcmd.h"
45#include "pwrseq.h" 44#include "pwrseq.h"
46 45
47#define LLT_CONFIG 5 46#define LLT_CONFIG 5
@@ -148,8 +147,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
148 } 147 }
149 148
150 if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) { 149 if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
151 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, 150 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
152 (u8 *)(&rpwm_val));
153 if (FW_PS_IS_ACK(rpwm_val)) { 151 if (FW_PS_IS_ACK(rpwm_val)) {
154 isr_regaddr = REG_HISR; 152 isr_regaddr = REG_HISR;
155 content = rtl_read_dword(rtlpriv, isr_regaddr); 153 content = rtl_read_dword(rtlpriv, isr_regaddr);
@@ -226,7 +224,7 @@ static void _rtl88ee_set_fw_clock_off(struct ieee80211_hw *hw,
226 rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val); 224 rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
227 rtl_write_word(rtlpriv, REG_HISR, 0x0100); 225 rtl_write_word(rtlpriv, REG_HISR, 0x0100);
228 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, 226 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
229 (u8 *)(&rpwm_val)); 227 &rpwm_val);
230 spin_lock_bh(&rtlpriv->locks.fw_ps_lock); 228 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
231 rtlhal->fw_clk_change_in_progress = false; 229 rtlhal->fw_clk_change_in_progress = false;
232 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); 230 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
@@ -274,15 +272,14 @@ static void _rtl88ee_fwlps_leave(struct ieee80211_hw *hw)
274 _rtl88ee_set_fw_clock_on(hw, rpwm_val, false); 272 _rtl88ee_set_fw_clock_on(hw, rpwm_val, false);
275 rtlhal->allow_sw_to_change_hwclc = false; 273 rtlhal->allow_sw_to_change_hwclc = false;
276 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, 274 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
277 (u8 *)(&fw_pwrmode)); 275 &fw_pwrmode);
278 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, 276 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
279 (u8 *)(&fw_current_inps)); 277 (u8 *)(&fw_current_inps));
280 } else { 278 } else {
281 rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */ 279 rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
282 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, 280 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
283 (u8 *)(&rpwm_val));
284 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, 281 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
285 (u8 *)(&fw_pwrmode)); 282 &fw_pwrmode);
286 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, 283 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
287 (u8 *)(&fw_current_inps)); 284 (u8 *)(&fw_current_inps));
288 } 285 }
@@ -301,7 +298,7 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
301 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, 298 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
302 (u8 *)(&fw_current_inps)); 299 (u8 *)(&fw_current_inps));
303 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, 300 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
304 (u8 *)(&ppsc->fwctrl_psmode)); 301 &ppsc->fwctrl_psmode);
305 rtlhal->allow_sw_to_change_hwclc = true; 302 rtlhal->allow_sw_to_change_hwclc = true;
306 _rtl88ee_set_fw_clock_off(hw, rpwm_val); 303 _rtl88ee_set_fw_clock_off(hw, rpwm_val);
307 } else { 304 } else {
@@ -309,9 +306,8 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
309 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, 306 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
310 (u8 *)(&fw_current_inps)); 307 (u8 *)(&fw_current_inps));
311 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, 308 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
312 (u8 *)(&ppsc->fwctrl_psmode)); 309 &ppsc->fwctrl_psmode);
313 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, 310 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
314 (u8 *)(&rpwm_val));
315 } 311 }
316} 312}
317 313
@@ -420,12 +416,12 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
420 416
421 for (e_aci = 0; e_aci < AC_MAX; e_aci++) { 417 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
422 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, 418 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
423 (u8 *)(&e_aci)); 419 &e_aci);
424 } 420 }
425 break; } 421 break; }
426 case HW_VAR_ACK_PREAMBLE:{ 422 case HW_VAR_ACK_PREAMBLE:{
427 u8 reg_tmp; 423 u8 reg_tmp;
428 u8 short_preamble = (bool) (*(u8 *)val); 424 u8 short_preamble = (bool)*val;
429 reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2); 425 reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
430 if (short_preamble) { 426 if (short_preamble) {
431 reg_tmp |= 0x02; 427 reg_tmp |= 0x02;
@@ -436,13 +432,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
436 } 432 }
437 break; } 433 break; }
438 case HW_VAR_WPA_CONFIG: 434 case HW_VAR_WPA_CONFIG:
439 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val)); 435 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
440 break; 436 break;
441 case HW_VAR_AMPDU_MIN_SPACE:{ 437 case HW_VAR_AMPDU_MIN_SPACE:{
442 u8 min_spacing_to_set; 438 u8 min_spacing_to_set;
443 u8 sec_min_space; 439 u8 sec_min_space;
444 440
445 min_spacing_to_set = *((u8 *)val); 441 min_spacing_to_set = *val;
446 if (min_spacing_to_set <= 7) { 442 if (min_spacing_to_set <= 7) {
447 sec_min_space = 0; 443 sec_min_space = 0;
448 444
@@ -465,7 +461,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
465 case HW_VAR_SHORTGI_DENSITY:{ 461 case HW_VAR_SHORTGI_DENSITY:{
466 u8 density_to_set; 462 u8 density_to_set;
467 463
468 density_to_set = *((u8 *)val); 464 density_to_set = *val;
469 mac->min_space_cfg |= (density_to_set << 3); 465 mac->min_space_cfg |= (density_to_set << 3);
470 466
471 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 467 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -483,7 +479,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
483 479
484 reg = regtoset_normal; 480 reg = regtoset_normal;
485 481
486 factor = *((u8 *)val); 482 factor = *val;
487 if (factor <= 3) { 483 if (factor <= 3) {
488 factor = (1 << (factor + 2)); 484 factor = (1 << (factor + 2));
489 if (factor > 0xf) 485 if (factor > 0xf)
@@ -506,15 +502,15 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
506 } 502 }
507 break; } 503 break; }
508 case HW_VAR_AC_PARAM:{ 504 case HW_VAR_AC_PARAM:{
509 u8 e_aci = *((u8 *)val); 505 u8 e_aci = *val;
510 rtl88e_dm_init_edca_turbo(hw); 506 rtl88e_dm_init_edca_turbo(hw);
511 507
512 if (rtlpci->acm_method != eAcmWay2_SW) 508 if (rtlpci->acm_method != EACMWAY2_SW)
513 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, 509 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
514 (u8 *)(&e_aci)); 510 &e_aci);
515 break; } 511 break; }
516 case HW_VAR_ACM_CTRL:{ 512 case HW_VAR_ACM_CTRL:{
517 u8 e_aci = *((u8 *)val); 513 u8 e_aci = *val;
518 union aci_aifsn *p_aci_aifsn = 514 union aci_aifsn *p_aci_aifsn =
519 (union aci_aifsn *)(&(mac->ac[0].aifs)); 515 (union aci_aifsn *)(&(mac->ac[0].aifs));
520 u8 acm = p_aci_aifsn->f.acm; 516 u8 acm = p_aci_aifsn->f.acm;
@@ -567,7 +563,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
567 rtlpci->receive_config = ((u32 *)(val))[0]; 563 rtlpci->receive_config = ((u32 *)(val))[0];
568 break; 564 break;
569 case HW_VAR_RETRY_LIMIT:{ 565 case HW_VAR_RETRY_LIMIT:{
570 u8 retry_limit = ((u8 *)(val))[0]; 566 u8 retry_limit = *val;
571 567
572 rtl_write_word(rtlpriv, REG_RL, 568 rtl_write_word(rtlpriv, REG_RL,
573 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 569 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -580,7 +576,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
580 rtlefuse->efuse_usedbytes = *((u16 *)val); 576 rtlefuse->efuse_usedbytes = *((u16 *)val);
581 break; 577 break;
582 case HW_VAR_EFUSE_USAGE: 578 case HW_VAR_EFUSE_USAGE:
583 rtlefuse->efuse_usedpercentage = *((u8 *)val); 579 rtlefuse->efuse_usedpercentage = *val;
584 break; 580 break;
585 case HW_VAR_IO_CMD: 581 case HW_VAR_IO_CMD:
586 rtl88e_phy_set_io_cmd(hw, (*(enum io_type *)val)); 582 rtl88e_phy_set_io_cmd(hw, (*(enum io_type *)val));
@@ -592,15 +588,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
592 udelay(1); 588 udelay(1);
593 589
594 if (rpwm_val & BIT(7)) { 590 if (rpwm_val & BIT(7)) {
595 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 591 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
596 (*(u8 *)val));
597 } else { 592 } else {
598 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 593 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
599 ((*(u8 *)val) | BIT(7)));
600 } 594 }
601 break; } 595 break; }
602 case HW_VAR_H2C_FW_PWRMODE: 596 case HW_VAR_H2C_FW_PWRMODE:
603 rtl88e_set_fw_pwrmode_cmd(hw, (*(u8 *)val)); 597 rtl88e_set_fw_pwrmode_cmd(hw, *val);
604 break; 598 break;
605 case HW_VAR_FW_PSMODE_STATUS: 599 case HW_VAR_FW_PSMODE_STATUS:
606 ppsc->fw_current_inpsmode = *((bool *)val); 600 ppsc->fw_current_inpsmode = *((bool *)val);
@@ -617,7 +611,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
617 _rtl88ee_fwlps_leave(hw); 611 _rtl88ee_fwlps_leave(hw);
618 break; } 612 break; }
619 case HW_VAR_H2C_FW_JOINBSSRPT:{ 613 case HW_VAR_H2C_FW_JOINBSSRPT:{
620 u8 mstatus = (*(u8 *)val); 614 u8 mstatus = *val;
621 u8 tmp, tmp_reg422, uval; 615 u8 tmp, tmp_reg422, uval;
622 u8 count = 0, dlbcn_count = 0; 616 u8 count = 0, dlbcn_count = 0;
623 bool recover = false; 617 bool recover = false;
@@ -668,10 +662,10 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
668 } 662 }
669 rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & ~(BIT(0)))); 663 rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & ~(BIT(0))));
670 } 664 }
671 rtl88e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val)); 665 rtl88e_set_fw_joinbss_report_cmd(hw, *val);
672 break; } 666 break; }
673 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: 667 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
674 rtl88e_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); 668 rtl88e_set_p2p_ps_offload_cmd(hw, *val);
675 break; 669 break;
676 case HW_VAR_AID:{ 670 case HW_VAR_AID:{
677 u16 u2btmp; 671 u16 u2btmp;
@@ -681,7 +675,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
681 mac->assoc_id)); 675 mac->assoc_id));
682 break; } 676 break; }
683 case HW_VAR_CORRECT_TSF:{ 677 case HW_VAR_CORRECT_TSF:{
684 u8 btype_ibss = ((u8 *)(val))[0]; 678 u8 btype_ibss = *val;
685 679
686 if (btype_ibss == true) 680 if (btype_ibss == true)
687 _rtl88ee_stop_tx_beacon(hw); 681 _rtl88ee_stop_tx_beacon(hw);
@@ -815,11 +809,11 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
815 809
816 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); 810 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
817 /* HW Power on sequence */ 811 /* HW Power on sequence */
818 if (!rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, 812 if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
819 PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, 813 PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
820 Rtl8188E_NIC_ENABLE_FLOW)) { 814 Rtl8188E_NIC_ENABLE_FLOW)) {
821 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 815 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
822 "init MAC Fail as rtl88_hal_pwrseqcmdparsing\n"); 816 "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
823 return false; 817 return false;
824 } 818 }
825 819
@@ -1025,9 +1019,20 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
1025 bool rtstatus = true; 1019 bool rtstatus = true;
1026 int err = 0; 1020 int err = 0;
1027 u8 tmp_u1b, u1byte; 1021 u8 tmp_u1b, u1byte;
1022 unsigned long flags;
1028 1023
1029 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n"); 1024 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n");
1030 rtlpriv->rtlhal.being_init_adapter = true; 1025 rtlpriv->rtlhal.being_init_adapter = true;
1026 /* As this function can take a very long time (up to 350 ms)
1027 * and can be called with irqs disabled, reenable the irqs
1028 * to let the other devices continue being serviced.
1029 *
1030 * It is safe doing so since our own interrupts will only be enabled
1031 * in a subsequent step.
1032 */
1033 local_save_flags(flags);
1034 local_irq_enable();
1035
1031 rtlpriv->intf_ops->disable_aspm(hw); 1036 rtlpriv->intf_ops->disable_aspm(hw);
1032 1037
1033 tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1); 1038 tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1);
@@ -1043,7 +1048,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
1043 if (rtstatus != true) { 1048 if (rtstatus != true) {
1044 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); 1049 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
1045 err = 1; 1050 err = 1;
1046 return err; 1051 goto exit;
1047 } 1052 }
1048 1053
1049 err = rtl88e_download_fw(hw, false); 1054 err = rtl88e_download_fw(hw, false);
@@ -1051,8 +1056,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
1051 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 1056 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1052 "Failed to download FW. Init HW without FW now..\n"); 1057 "Failed to download FW. Init HW without FW now..\n");
1053 err = 1; 1058 err = 1;
1054 rtlhal->fw_ready = false; 1059 goto exit;
1055 return err;
1056 } else { 1060 } else {
1057 rtlhal->fw_ready = true; 1061 rtlhal->fw_ready = true;
1058 } 1062 }
@@ -1097,7 +1101,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
1097 if (ppsc->rfpwr_state == ERFON) { 1101 if (ppsc->rfpwr_state == ERFON) {
1098 if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) || 1102 if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) ||
1099 ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) && 1103 ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) &&
1100 (rtlhal->oem_id == RT_CID_819x_HP))) { 1104 (rtlhal->oem_id == RT_CID_819X_HP))) {
1101 rtl88e_phy_set_rfpath_switch(hw, true); 1105 rtl88e_phy_set_rfpath_switch(hw, true);
1102 rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT; 1106 rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT;
1103 } else { 1107 } else {
@@ -1135,10 +1139,12 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
1135 } 1139 }
1136 rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128)); 1140 rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
1137 rtl88e_dm_init(hw); 1141 rtl88e_dm_init(hw);
1142exit:
1143 local_irq_restore(flags);
1138 rtlpriv->rtlhal.being_init_adapter = false; 1144 rtlpriv->rtlhal.being_init_adapter = false;
1139 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n", 1145 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n",
1140 err); 1146 err);
1141 return 0; 1147 return err;
1142} 1148}
1143 1149
1144static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw) 1150static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
@@ -1235,12 +1241,13 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
1235void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1241void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1236{ 1242{
1237 struct rtl_priv *rtlpriv = rtl_priv(hw); 1243 struct rtl_priv *rtlpriv = rtl_priv(hw);
1238 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1244 u32 reg_rcr;
1239 u32 reg_rcr = rtlpci->receive_config;
1240 1245
1241 if (rtlpriv->psc.rfpwr_state != ERFON) 1246 if (rtlpriv->psc.rfpwr_state != ERFON)
1242 return; 1247 return;
1243 1248
1249 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1250
1244 if (check_bssid == true) { 1251 if (check_bssid == true) {
1245 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1252 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1246 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1253 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1345,9 +1352,9 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
1345 } 1352 }
1346 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF); 1353 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
1347 1354
1348 rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, 1355 rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1349 PWR_INTF_PCI_MSK, 1356 PWR_INTF_PCI_MSK,
1350 Rtl8188E_NIC_LPS_ENTER_FLOW); 1357 Rtl8188E_NIC_LPS_ENTER_FLOW);
1351 1358
1352 rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); 1359 rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
1353 1360
@@ -1361,8 +1368,8 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
1361 u1b_tmp = rtl_read_byte(rtlpriv, REG_32K_CTRL); 1368 u1b_tmp = rtl_read_byte(rtlpriv, REG_32K_CTRL);
1362 rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0)))); 1369 rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0))));
1363 1370
1364 rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, 1371 rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1365 PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW); 1372 PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
1366 1373
1367 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); 1374 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
1368 rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3)))); 1375 rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
@@ -1816,7 +1823,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
1816 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1823 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1817 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); 1824 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
1818 /*customer ID*/ 1825 /*customer ID*/
1819 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1826 rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
1820 if (rtlefuse->eeprom_oemid == 0xFF) 1827 if (rtlefuse->eeprom_oemid == 0xFF)
1821 rtlefuse->eeprom_oemid = 0; 1828 rtlefuse->eeprom_oemid = 0;
1822 1829
@@ -1833,7 +1840,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
1833 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1840 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1834 "dev_addr: %pM\n", rtlefuse->dev_addr); 1841 "dev_addr: %pM\n", rtlefuse->dev_addr);
1835 /*channel plan */ 1842 /*channel plan */
1836 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1843 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
1837 /* set channel paln to world wide 13 */ 1844 /* set channel paln to world wide 13 */
1838 rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; 1845 rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
1839 /*tx power*/ 1846 /*tx power*/
@@ -1845,7 +1852,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
1845 rtlefuse->autoload_failflag, 1852 rtlefuse->autoload_failflag,
1846 hwinfo); 1853 hwinfo);
1847 /*board type*/ 1854 /*board type*/
1848 rtlefuse->board_type = (((*(u8 *)&hwinfo[jj]) & 0xE0) >> 5); 1855 rtlefuse->board_type = (hwinfo[jj] & 0xE0) >> 5;
1849 /*Wake on wlan*/ 1856 /*Wake on wlan*/
1850 rtlefuse->wowlan_enable = ((hwinfo[kk] & 0x40) >> 6); 1857 rtlefuse->wowlan_enable = ((hwinfo[kk] & 0x40) >> 6);
1851 /*parse xtal*/ 1858 /*parse xtal*/
@@ -1872,15 +1879,15 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
1872 case EEPROM_CID_DEFAULT: 1879 case EEPROM_CID_DEFAULT:
1873 if (rtlefuse->eeprom_did == 0x8179) { 1880 if (rtlefuse->eeprom_did == 0x8179) {
1874 if (rtlefuse->eeprom_svid == 0x1025) { 1881 if (rtlefuse->eeprom_svid == 0x1025) {
1875 rtlhal->oem_id = RT_CID_819x_Acer; 1882 rtlhal->oem_id = RT_CID_819X_ACER;
1876 } else if ((rtlefuse->eeprom_svid == 0x10EC && 1883 } else if ((rtlefuse->eeprom_svid == 0x10EC &&
1877 rtlefuse->eeprom_smid == 0x0179) || 1884 rtlefuse->eeprom_smid == 0x0179) ||
1878 (rtlefuse->eeprom_svid == 0x17AA && 1885 (rtlefuse->eeprom_svid == 0x17AA &&
1879 rtlefuse->eeprom_smid == 0x0179)) { 1886 rtlefuse->eeprom_smid == 0x0179)) {
1880 rtlhal->oem_id = RT_CID_819x_Lenovo; 1887 rtlhal->oem_id = RT_CID_819X_LENOVO;
1881 } else if (rtlefuse->eeprom_svid == 0x103c && 1888 } else if (rtlefuse->eeprom_svid == 0x103c &&
1882 rtlefuse->eeprom_smid == 0x197d) { 1889 rtlefuse->eeprom_smid == 0x197d) {
1883 rtlhal->oem_id = RT_CID_819x_HP; 1890 rtlhal->oem_id = RT_CID_819X_HP;
1884 } else { 1891 } else {
1885 rtlhal->oem_id = RT_CID_DEFAULT; 1892 rtlhal->oem_id = RT_CID_DEFAULT;
1886 } 1893 }
@@ -1892,7 +1899,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
1892 rtlhal->oem_id = RT_CID_TOSHIBA; 1899 rtlhal->oem_id = RT_CID_TOSHIBA;
1893 break; 1900 break;
1894 case EEPROM_CID_QMI: 1901 case EEPROM_CID_QMI:
1895 rtlhal->oem_id = RT_CID_819x_QMI; 1902 rtlhal->oem_id = RT_CID_819X_QMI;
1896 break; 1903 break;
1897 case EEPROM_CID_WHQL: 1904 case EEPROM_CID_WHQL:
1898 default: 1905 default:
@@ -1911,14 +1918,14 @@ static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
1911 pcipriv->ledctl.led_opendrain = true; 1918 pcipriv->ledctl.led_opendrain = true;
1912 1919
1913 switch (rtlhal->oem_id) { 1920 switch (rtlhal->oem_id) {
1914 case RT_CID_819x_HP: 1921 case RT_CID_819X_HP:
1915 pcipriv->ledctl.led_opendrain = true; 1922 pcipriv->ledctl.led_opendrain = true;
1916 break; 1923 break;
1917 case RT_CID_819x_Lenovo: 1924 case RT_CID_819X_LENOVO:
1918 case RT_CID_DEFAULT: 1925 case RT_CID_DEFAULT:
1919 case RT_CID_TOSHIBA: 1926 case RT_CID_TOSHIBA:
1920 case RT_CID_CCX: 1927 case RT_CID_CCX:
1921 case RT_CID_819x_Acer: 1928 case RT_CID_819X_ACER:
1922 case RT_CID_WHQL: 1929 case RT_CID_WHQL:
1923 default: 1930 default:
1924 break; 1931 break;
@@ -2211,8 +2218,7 @@ void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw)
2211 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 2218 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2212 u16 sifs_timer; 2219 u16 sifs_timer;
2213 2220
2214 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2221 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
2215 (u8 *)&mac->slot_time);
2216 if (!mac->ht_enable) 2222 if (!mac->ht_enable)
2217 sifs_timer = 0x0a0a; 2223 sifs_timer = 0x0a0a;
2218 else 2224 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index d67f9c731cc4..1cd6c16d597e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -29,6 +29,7 @@
29 29
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../core.h"
32#include "../ps.h" 33#include "../ps.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
@@ -151,18 +152,7 @@ static bool config_bb_with_pgheader(struct ieee80211_hw *hw,
151 v2 = table_pg[i + 1]; 152 v2 = table_pg[i + 1];
152 153
153 if (v1 < 0xcdcdcdcd) { 154 if (v1 < 0xcdcdcdcd) {
154 if (table_pg[i] == 0xfe) 155 rtl_addr_delay(table_pg[i]);
155 mdelay(50);
156 else if (table_pg[i] == 0xfd)
157 mdelay(5);
158 else if (table_pg[i] == 0xfc)
159 mdelay(1);
160 else if (table_pg[i] == 0xfb)
161 udelay(50);
162 else if (table_pg[i] == 0xfa)
163 udelay(5);
164 else if (table_pg[i] == 0xf9)
165 udelay(1);
166 156
167 store_pwrindex_offset(hw, table_pg[i], 157 store_pwrindex_offset(hw, table_pg[i],
168 table_pg[i + 1], 158 table_pg[i + 1],
@@ -672,24 +662,9 @@ static void _rtl8188e_config_rf_reg(struct ieee80211_hw *hw,
672 u32 addr, u32 data, enum radio_path rfpath, 662 u32 addr, u32 data, enum radio_path rfpath,
673 u32 regaddr) 663 u32 regaddr)
674{ 664{
675 if (addr == 0xffe) { 665 rtl_rfreg_delay(hw, rfpath, regaddr,
676 mdelay(50); 666 RFREG_OFFSET_MASK,
677 } else if (addr == 0xfd) { 667 data);
678 mdelay(5);
679 } else if (addr == 0xfc) {
680 mdelay(1);
681 } else if (addr == 0xfb) {
682 udelay(50);
683 } else if (addr == 0xfa) {
684 udelay(5);
685 } else if (addr == 0xf9) {
686 udelay(1);
687 } else {
688 rtl_set_rfreg(hw, rfpath, regaddr,
689 RFREG_OFFSET_MASK,
690 data);
691 udelay(1);
692 }
693} 668}
694 669
695static void rtl88_config_s(struct ieee80211_hw *hw, 670static void rtl88_config_s(struct ieee80211_hw *hw,
@@ -702,28 +677,6 @@ static void rtl88_config_s(struct ieee80211_hw *hw,
702 addr | maskforphyset); 677 addr | maskforphyset);
703} 678}
704 679
705static void _rtl8188e_config_bb_reg(struct ieee80211_hw *hw,
706 u32 addr, u32 data)
707{
708 if (addr == 0xfe) {
709 mdelay(50);
710 } else if (addr == 0xfd) {
711 mdelay(5);
712 } else if (addr == 0xfc) {
713 mdelay(1);
714 } else if (addr == 0xfb) {
715 udelay(50);
716 } else if (addr == 0xfa) {
717 udelay(5);
718 } else if (addr == 0xf9) {
719 udelay(1);
720 } else {
721 rtl_set_bbreg(hw, addr, MASKDWORD, data);
722 udelay(1);
723 }
724}
725
726
727#define NEXT_PAIR(v1, v2, i) \ 680#define NEXT_PAIR(v1, v2, i) \
728 do { \ 681 do { \
729 i += 2; v1 = array_table[i]; \ 682 i += 2; v1 = array_table[i]; \
@@ -795,7 +748,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
795 v1 = array_table[i]; 748 v1 = array_table[i];
796 v2 = array_table[i + 1]; 749 v2 = array_table[i + 1];
797 if (v1 < 0xcdcdcdcd) { 750 if (v1 < 0xcdcdcdcd) {
798 _rtl8188e_config_bb_reg(hw, v1, v2); 751 rtl_bb_delay(hw, v1, v2);
799 } else {/*This line is the start line of branch.*/ 752 } else {/*This line is the start line of branch.*/
800 if (!check_cond(hw, array_table[i])) { 753 if (!check_cond(hw, array_table[i])) {
801 /*Discard the following (offset, data) pairs*/ 754 /*Discard the following (offset, data) pairs*/
@@ -811,7 +764,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
811 while (v2 != 0xDEAD && 764 while (v2 != 0xDEAD &&
812 v2 != 0xCDEF && 765 v2 != 0xCDEF &&
813 v2 != 0xCDCD && i < arraylen - 2) { 766 v2 != 0xCDCD && i < arraylen - 2) {
814 _rtl8188e_config_bb_reg(hw, v1, v2); 767 rtl_bb_delay(hw, v1, v2);
815 NEXT_PAIR(v1, v2, i); 768 NEXT_PAIR(v1, v2, i);
816 } 769 }
817 770
@@ -1002,7 +955,7 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
1002 } 955 }
1003 } 956 }
1004 957
1005 if (rtlhal->oem_id == RT_CID_819x_HP) 958 if (rtlhal->oem_id == RT_CID_819X_HP)
1006 rtl88_config_s(hw, 0x52, 0x7E4BD); 959 rtl88_config_s(hw, 0x52, 0x7E4BD);
1007 960
1008 break; 961 break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
index 028ec6dd52b4..32e135ab9a63 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
@@ -30,7 +30,6 @@
30#ifndef __RTL8723E_PWRSEQ_H__ 30#ifndef __RTL8723E_PWRSEQ_H__
31#define __RTL8723E_PWRSEQ_H__ 31#define __RTL8723E_PWRSEQ_H__
32 32
33#include "pwrseqcmd.h"
34/* 33/*
35 Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd 34 Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
36 There are 6 HW Power States: 35 There are 6 HW Power States:
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
index d849abf7d94a..7af85cfa8f87 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
@@ -2215,22 +2215,6 @@
2215#define BWORD1 0xc 2215#define BWORD1 0xc
2216#define BWORD 0xf 2216#define BWORD 0xf
2217 2217
2218#define MASKBYTE0 0xff
2219#define MASKBYTE1 0xff00
2220#define MASKBYTE2 0xff0000
2221#define MASKBYTE3 0xff000000
2222#define MASKHWORD 0xffff0000
2223#define MASKLWORD 0x0000ffff
2224#define MASKDWORD 0xffffffff
2225#define MASK12BITS 0xfff
2226#define MASKH4BITS 0xf0000000
2227#define MASKOFDM_D 0xffc00000
2228#define MASKCCK 0x3f3f3f3f
2229
2230#define MASK4BITS 0x0f
2231#define MASK20BITS 0xfffff
2232#define RFREG_OFFSET_MASK 0xfffff
2233
2234#define BENABLE 0x1 2218#define BENABLE 0x1
2235#define BDISABLE 0x0 2219#define BDISABLE 0x0
2236 2220
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 347af1e4f438..1b4101bf9974 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,6 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
93 u8 tid; 93 u8 tid;
94 94
95 rtl8188ee_bt_reg_init(hw); 95 rtl8188ee_bt_reg_init(hw);
96 rtlpci->msi_support = true;
96 97
97 rtlpriv->dm.dm_initialgain_enable = 1; 98 rtlpriv->dm.dm_initialgain_enable = 1;
98 rtlpriv->dm.dm_flag = 0; 99 rtlpriv->dm.dm_flag = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index aece6c9cccf1..06ef47cd6203 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -452,7 +452,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
452 /* During testing, hdr was NULL */ 452 /* During testing, hdr was NULL */
453 return false; 453 return false;
454 } 454 }
455 if ((ieee80211_is_robust_mgmt_frame(hdr)) && 455 if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
456 (ieee80211_has_protected(hdr->frame_control))) 456 (ieee80211_has_protected(hdr->frame_control)))
457 rx_status->flag &= ~RX_FLAG_DECRYPTED; 457 rx_status->flag &= ~RX_FLAG_DECRYPTED;
458 else 458 else
@@ -489,16 +489,15 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
489 489
490void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, 490void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
491 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 491 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
492 struct ieee80211_tx_info *info, 492 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
493 struct ieee80211_sta *sta, 493 struct ieee80211_sta *sta, struct sk_buff *skb,
494 struct sk_buff *skb,
495 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 494 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
496{ 495{
497 struct rtl_priv *rtlpriv = rtl_priv(hw); 496 struct rtl_priv *rtlpriv = rtl_priv(hw);
498 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 497 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
499 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 498 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
500 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 499 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
501 u8 *pdesc = (u8 *)pdesc_tx; 500 u8 *pdesc = pdesc_tx;
502 u16 seq_number; 501 u16 seq_number;
503 __le16 fc = hdr->frame_control; 502 __le16 fc = hdr->frame_control;
504 unsigned int buf_len = 0; 503 unsigned int buf_len = 0;
@@ -717,7 +716,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
717 716
718 SET_TX_DESC_OWN(pdesc, 1); 717 SET_TX_DESC_OWN(pdesc, 1);
719 718
720 SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); 719 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
721 720
722 SET_TX_DESC_FIRST_SEG(pdesc, 1); 721 SET_TX_DESC_FIRST_SEG(pdesc, 1);
723 SET_TX_DESC_LAST_SEG(pdesc, 1); 722 SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -734,7 +733,8 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
734 pdesc, TX_DESC_SIZE); 733 pdesc, TX_DESC_SIZE);
735} 734}
736 735
737void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val) 736void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
737 u8 desc_name, u8 *val)
738{ 738{
739 if (istx == true) { 739 if (istx == true) {
740 switch (desc_name) { 740 switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
index 21ca33a7c770..8c2609412d2c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
@@ -777,15 +777,15 @@ struct rx_desc_88e {
777 777
778void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, 778void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
779 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 779 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
780 struct ieee80211_tx_info *info, 780 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
781 struct ieee80211_sta *sta, 781 struct ieee80211_sta *sta, struct sk_buff *skb,
782 struct sk_buff *skb,
783 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); 782 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
784bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, 783bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
785 struct rtl_stats *status, 784 struct rtl_stats *status,
786 struct ieee80211_rx_status *rx_status, 785 struct ieee80211_rx_status *rx_status,
787 u8 *pdesc, struct sk_buff *skb); 786 u8 *pdesc, struct sk_buff *skb);
788void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val); 787void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
788 u8 desc_name, u8 *val);
789u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name); 789u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name);
790void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); 790void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
791void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, 791void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 2eb0b38384dd..55adf043aef7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -319,7 +319,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
319 u8 e_aci = *(val); 319 u8 e_aci = *(val);
320 rtl92c_dm_init_edca_turbo(hw); 320 rtl92c_dm_init_edca_turbo(hw);
321 321
322 if (rtlpci->acm_method != eAcmWay2_SW) 322 if (rtlpci->acm_method != EACMWAY2_SW)
323 rtlpriv->cfg->ops->set_hw_reg(hw, 323 rtlpriv->cfg->ops->set_hw_reg(hw,
324 HW_VAR_ACM_CTRL, 324 HW_VAR_ACM_CTRL,
325 (&e_aci)); 325 (&e_aci));
@@ -476,7 +476,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
476 break; 476 break;
477 } 477 }
478 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: 478 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
479 rtl92c_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); 479 rtl92c_set_p2p_ps_offload_cmd(hw, *val);
480 break; 480 break;
481 case HW_VAR_AID:{ 481 case HW_VAR_AID:{
482 u16 u2btmp; 482 u16 u2btmp;
@@ -521,30 +521,32 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
521 (u8 *)(&fw_current_inps)); 521 (u8 *)(&fw_current_inps));
522 rtlpriv->cfg->ops->set_hw_reg(hw, 522 rtlpriv->cfg->ops->set_hw_reg(hw,
523 HW_VAR_H2C_FW_PWRMODE, 523 HW_VAR_H2C_FW_PWRMODE,
524 (u8 *)(&ppsc->fwctrl_psmode)); 524 &ppsc->fwctrl_psmode);
525 525
526 rtlpriv->cfg->ops->set_hw_reg(hw, 526 rtlpriv->cfg->ops->set_hw_reg(hw,
527 HW_VAR_SET_RPWM, 527 HW_VAR_SET_RPWM,
528 (u8 *)(&rpwm_val)); 528 &rpwm_val);
529 } else { 529 } else {
530 rpwm_val = 0x0C; /* RF on */ 530 rpwm_val = 0x0C; /* RF on */
531 fw_pwrmode = FW_PS_ACTIVE_MODE; 531 fw_pwrmode = FW_PS_ACTIVE_MODE;
532 fw_current_inps = false; 532 fw_current_inps = false;
533 rtlpriv->cfg->ops->set_hw_reg(hw, 533 rtlpriv->cfg->ops->set_hw_reg(hw,
534 HW_VAR_SET_RPWM, 534 HW_VAR_SET_RPWM,
535 (u8 *)(&rpwm_val)); 535 &rpwm_val);
536 rtlpriv->cfg->ops->set_hw_reg(hw, 536 rtlpriv->cfg->ops->set_hw_reg(hw,
537 HW_VAR_H2C_FW_PWRMODE, 537 HW_VAR_H2C_FW_PWRMODE,
538 (u8 *)(&fw_pwrmode)); 538 &fw_pwrmode);
539 539
540 rtlpriv->cfg->ops->set_hw_reg(hw, 540 rtlpriv->cfg->ops->set_hw_reg(hw,
541 HW_VAR_FW_PSMODE_STATUS, 541 HW_VAR_FW_PSMODE_STATUS,
542 (u8 *)(&fw_current_inps)); 542 (u8 *)(&fw_current_inps));
543 } 543 }
544 break; } 544 break; }
545 case HW_VAR_KEEP_ALIVE:
546 break;
545 default: 547 default:
546 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 548 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
547 "switch case not processed\n"); 549 "switch case %d not processed\n", variable);
548 break; 550 break;
549 } 551 }
550} 552}
@@ -1214,11 +1216,13 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
1214void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1216void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1215{ 1217{
1216 struct rtl_priv *rtlpriv = rtl_priv(hw); 1218 struct rtl_priv *rtlpriv = rtl_priv(hw);
1217 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); 1219 u32 reg_rcr;
1218 1220
1219 if (rtlpriv->psc.rfpwr_state != ERFON) 1221 if (rtlpriv->psc.rfpwr_state != ERFON)
1220 return; 1222 return;
1221 1223
1224 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1225
1222 if (check_bssid) { 1226 if (check_bssid) {
1223 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1227 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1224 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1228 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1734,7 +1738,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1734 if (rtlefuse->eeprom_did == 0x8176) { 1738 if (rtlefuse->eeprom_did == 0x8176) {
1735 if ((rtlefuse->eeprom_svid == 0x103C && 1739 if ((rtlefuse->eeprom_svid == 0x103C &&
1736 rtlefuse->eeprom_smid == 0x1629)) 1740 rtlefuse->eeprom_smid == 0x1629))
1737 rtlhal->oem_id = RT_CID_819x_HP; 1741 rtlhal->oem_id = RT_CID_819X_HP;
1738 else 1742 else
1739 rtlhal->oem_id = RT_CID_DEFAULT; 1743 rtlhal->oem_id = RT_CID_DEFAULT;
1740 } else { 1744 } else {
@@ -1745,7 +1749,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1745 rtlhal->oem_id = RT_CID_TOSHIBA; 1749 rtlhal->oem_id = RT_CID_TOSHIBA;
1746 break; 1750 break;
1747 case EEPROM_CID_QMI: 1751 case EEPROM_CID_QMI:
1748 rtlhal->oem_id = RT_CID_819x_QMI; 1752 rtlhal->oem_id = RT_CID_819X_QMI;
1749 break; 1753 break;
1750 case EEPROM_CID_WHQL: 1754 case EEPROM_CID_WHQL:
1751 default: 1755 default:
@@ -1764,14 +1768,14 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
1764 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1768 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1765 1769
1766 switch (rtlhal->oem_id) { 1770 switch (rtlhal->oem_id) {
1767 case RT_CID_819x_HP: 1771 case RT_CID_819X_HP:
1768 pcipriv->ledctl.led_opendrain = true; 1772 pcipriv->ledctl.led_opendrain = true;
1769 break; 1773 break;
1770 case RT_CID_819x_Lenovo: 1774 case RT_CID_819X_LENOVO:
1771 case RT_CID_DEFAULT: 1775 case RT_CID_DEFAULT:
1772 case RT_CID_TOSHIBA: 1776 case RT_CID_TOSHIBA:
1773 case RT_CID_CCX: 1777 case RT_CID_CCX:
1774 case RT_CID_819x_Acer: 1778 case RT_CID_819X_ACER:
1775 case RT_CID_WHQL: 1779 case RT_CID_WHQL:
1776 default: 1780 default:
1777 break; 1781 break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 73262ca3864b..98b22303c84d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../ps.h" 32#include "../ps.h"
33#include "../core.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "hw.h" 36#include "hw.h"
@@ -198,18 +199,7 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
198 } 199 }
199 if (configtype == BASEBAND_CONFIG_PHY_REG) { 200 if (configtype == BASEBAND_CONFIG_PHY_REG) {
200 for (i = 0; i < phy_reg_arraylen; i = i + 2) { 201 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
201 if (phy_regarray_table[i] == 0xfe) 202 rtl_addr_delay(phy_regarray_table[i]);
202 mdelay(50);
203 else if (phy_regarray_table[i] == 0xfd)
204 mdelay(5);
205 else if (phy_regarray_table[i] == 0xfc)
206 mdelay(1);
207 else if (phy_regarray_table[i] == 0xfb)
208 udelay(50);
209 else if (phy_regarray_table[i] == 0xfa)
210 udelay(5);
211 else if (phy_regarray_table[i] == 0xf9)
212 udelay(1);
213 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD, 203 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
214 phy_regarray_table[i + 1]); 204 phy_regarray_table[i + 1]);
215 udelay(1); 205 udelay(1);
@@ -245,18 +235,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
245 235
246 if (configtype == BASEBAND_CONFIG_PHY_REG) { 236 if (configtype == BASEBAND_CONFIG_PHY_REG) {
247 for (i = 0; i < phy_regarray_pg_len; i = i + 3) { 237 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
248 if (phy_regarray_table_pg[i] == 0xfe) 238 rtl_addr_delay(phy_regarray_table_pg[i]);
249 mdelay(50);
250 else if (phy_regarray_table_pg[i] == 0xfd)
251 mdelay(5);
252 else if (phy_regarray_table_pg[i] == 0xfc)
253 mdelay(1);
254 else if (phy_regarray_table_pg[i] == 0xfb)
255 udelay(50);
256 else if (phy_regarray_table_pg[i] == 0xfa)
257 udelay(5);
258 else if (phy_regarray_table_pg[i] == 0xf9)
259 udelay(1);
260 239
261 _rtl92c_store_pwrIndex_diffrate_offset(hw, 240 _rtl92c_store_pwrIndex_diffrate_offset(hw,
262 phy_regarray_table_pg[i], 241 phy_regarray_table_pg[i],
@@ -305,46 +284,16 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
305 switch (rfpath) { 284 switch (rfpath) {
306 case RF90_PATH_A: 285 case RF90_PATH_A:
307 for (i = 0; i < radioa_arraylen; i = i + 2) { 286 for (i = 0; i < radioa_arraylen; i = i + 2) {
308 if (radioa_array_table[i] == 0xfe) 287 rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
309 mdelay(50); 288 RFREG_OFFSET_MASK,
310 else if (radioa_array_table[i] == 0xfd) 289 radioa_array_table[i + 1]);
311 mdelay(5);
312 else if (radioa_array_table[i] == 0xfc)
313 mdelay(1);
314 else if (radioa_array_table[i] == 0xfb)
315 udelay(50);
316 else if (radioa_array_table[i] == 0xfa)
317 udelay(5);
318 else if (radioa_array_table[i] == 0xf9)
319 udelay(1);
320 else {
321 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
322 RFREG_OFFSET_MASK,
323 radioa_array_table[i + 1]);
324 udelay(1);
325 }
326 } 290 }
327 break; 291 break;
328 case RF90_PATH_B: 292 case RF90_PATH_B:
329 for (i = 0; i < radiob_arraylen; i = i + 2) { 293 for (i = 0; i < radiob_arraylen; i = i + 2) {
330 if (radiob_array_table[i] == 0xfe) { 294 rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
331 mdelay(50); 295 RFREG_OFFSET_MASK,
332 } else if (radiob_array_table[i] == 0xfd) 296 radiob_array_table[i + 1]);
333 mdelay(5);
334 else if (radiob_array_table[i] == 0xfc)
335 mdelay(1);
336 else if (radiob_array_table[i] == 0xfb)
337 udelay(50);
338 else if (radiob_array_table[i] == 0xfa)
339 udelay(5);
340 else if (radiob_array_table[i] == 0xf9)
341 udelay(1);
342 else {
343 rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
344 RFREG_OFFSET_MASK,
345 radiob_array_table[i + 1]);
346 udelay(1);
347 }
348 } 297 }
349 break; 298 break;
350 case RF90_PATH_C: 299 case RF90_PATH_C:
@@ -355,6 +304,8 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
355 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 304 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
356 "switch case not processed\n"); 305 "switch case not processed\n");
357 break; 306 break;
307 default:
308 break;
358 } 309 }
359 return true; 310 return true;
360} 311}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index 8922ecb47ad2..ed703a1b3b7c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -2044,22 +2044,6 @@
2044#define BWORD1 0xc 2044#define BWORD1 0xc
2045#define BWORD 0xf 2045#define BWORD 0xf
2046 2046
2047#define MASKBYTE0 0xff
2048#define MASKBYTE1 0xff00
2049#define MASKBYTE2 0xff0000
2050#define MASKBYTE3 0xff000000
2051#define MASKHWORD 0xffff0000
2052#define MASKLWORD 0x0000ffff
2053#define MASKDWORD 0xffffffff
2054#define MASK12BITS 0xfff
2055#define MASKH4BITS 0xf0000000
2056#define MASKOFDM_D 0xffc00000
2057#define MASKCCK 0x3f3f3f3f
2058
2059#define MASK4BITS 0x0f
2060#define MASK20BITS 0xfffff
2061#define RFREG_OFFSET_MASK 0xfffff
2062
2063#define BENABLE 0x1 2047#define BENABLE 0x1
2064#define BDISABLE 0x0 2048#define BDISABLE 0x0
2065 2049
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52abf0a862fa..8f04817cb7ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -393,7 +393,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
393 /* In testing, hdr was NULL here */ 393 /* In testing, hdr was NULL here */
394 return false; 394 return false;
395 } 395 }
396 if ((ieee80211_is_robust_mgmt_frame(hdr)) && 396 if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
397 (ieee80211_has_protected(hdr->frame_control))) 397 (ieee80211_has_protected(hdr->frame_control)))
398 rx_status->flag &= ~RX_FLAG_DECRYPTED; 398 rx_status->flag &= ~RX_FLAG_DECRYPTED;
399 else 399 else
@@ -426,7 +426,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
426 426
427void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 427void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
428 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 428 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
429 struct ieee80211_tx_info *info, 429 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
430 struct ieee80211_sta *sta, 430 struct ieee80211_sta *sta,
431 struct sk_buff *skb, 431 struct sk_buff *skb,
432 u8 hw_queue, struct rtl_tcb_desc *tcb_desc) 432 u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
@@ -666,7 +666,8 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
666 "H2C Tx Cmd Content", pdesc, TX_DESC_SIZE); 666 "H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
667} 667}
668 668
669void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val) 669void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
670 u8 desc_name, u8 *val)
670{ 671{
671 if (istx) { 672 if (istx) {
672 switch (desc_name) { 673 switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index a7cdd514cb2e..9a39ec4204dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -711,8 +711,8 @@ struct rx_desc_92c {
711} __packed; 711} __packed;
712 712
713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
714 struct ieee80211_hdr *hdr, 714 struct ieee80211_hdr *hdr, u8 *pdesc,
715 u8 *pdesc, struct ieee80211_tx_info *info, 715 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
716 struct ieee80211_sta *sta, 716 struct ieee80211_sta *sta,
717 struct sk_buff *skb, u8 hw_queue, 717 struct sk_buff *skb, u8 hw_queue,
718 struct rtl_tcb_desc *ptcb_desc); 718 struct rtl_tcb_desc *ptcb_desc);
@@ -720,7 +720,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
720 struct rtl_stats *stats, 720 struct rtl_stats *stats,
721 struct ieee80211_rx_status *rx_status, 721 struct ieee80211_rx_status *rx_status,
722 u8 *pdesc, struct sk_buff *skb); 722 u8 *pdesc, struct sk_buff *skb);
723void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val); 723void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
724 u8 desc_name, u8 *val);
724u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name); 725u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
725void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); 726void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
726void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, 727void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 468bf73cc883..68b5c7e92cfb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -394,7 +394,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
394 if (rtlefuse->eeprom_did == 0x8176) { 394 if (rtlefuse->eeprom_did == 0x8176) {
395 if ((rtlefuse->eeprom_svid == 0x103C && 395 if ((rtlefuse->eeprom_svid == 0x103C &&
396 rtlefuse->eeprom_smid == 0x1629)) 396 rtlefuse->eeprom_smid == 0x1629))
397 rtlhal->oem_id = RT_CID_819x_HP; 397 rtlhal->oem_id = RT_CID_819X_HP;
398 else 398 else
399 rtlhal->oem_id = RT_CID_DEFAULT; 399 rtlhal->oem_id = RT_CID_DEFAULT;
400 } else { 400 } else {
@@ -405,7 +405,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
405 rtlhal->oem_id = RT_CID_TOSHIBA; 405 rtlhal->oem_id = RT_CID_TOSHIBA;
406 break; 406 break;
407 case EEPROM_CID_QMI: 407 case EEPROM_CID_QMI:
408 rtlhal->oem_id = RT_CID_819x_QMI; 408 rtlhal->oem_id = RT_CID_819X_QMI;
409 break; 409 break;
410 case EEPROM_CID_WHQL: 410 case EEPROM_CID_WHQL:
411 default: 411 default:
@@ -423,14 +423,14 @@ static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
423 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 423 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
424 424
425 switch (rtlhal->oem_id) { 425 switch (rtlhal->oem_id) {
426 case RT_CID_819x_HP: 426 case RT_CID_819X_HP:
427 usb_priv->ledctl.led_opendrain = true; 427 usb_priv->ledctl.led_opendrain = true;
428 break; 428 break;
429 case RT_CID_819x_Lenovo: 429 case RT_CID_819X_LENOVO:
430 case RT_CID_DEFAULT: 430 case RT_CID_DEFAULT:
431 case RT_CID_TOSHIBA: 431 case RT_CID_TOSHIBA:
432 case RT_CID_CCX: 432 case RT_CID_CCX:
433 case RT_CID_819x_Acer: 433 case RT_CID_819X_ACER:
434 case RT_CID_WHQL: 434 case RT_CID_WHQL:
435 default: 435 default:
436 break; 436 break;
@@ -985,6 +985,17 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
985 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 985 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
986 int err = 0; 986 int err = 0;
987 static bool iqk_initialized; 987 static bool iqk_initialized;
988 unsigned long flags;
989
990 /* As this function can take a very long time (up to 350 ms)
991 * and can be called with irqs disabled, reenable the irqs
992 * to let the other devices continue being serviced.
993 *
994 * It is safe doing so since our own interrupts will only be enabled
995 * in a subsequent step.
996 */
997 local_save_flags(flags);
998 local_irq_enable();
988 999
989 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU; 1000 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
990 err = _rtl92cu_init_mac(hw); 1001 err = _rtl92cu_init_mac(hw);
@@ -997,7 +1008,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
997 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 1008 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
998 "Failed to download FW. Init HW without FW now..\n"); 1009 "Failed to download FW. Init HW without FW now..\n");
999 err = 1; 1010 err = 1;
1000 return err; 1011 goto exit;
1001 } 1012 }
1002 rtlhal->last_hmeboxnum = 0; /* h2c */ 1013 rtlhal->last_hmeboxnum = 0; /* h2c */
1003 _rtl92cu_phy_param_tab_init(hw); 1014 _rtl92cu_phy_param_tab_init(hw);
@@ -1034,6 +1045,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1034 _InitPABias(hw); 1045 _InitPABias(hw);
1035 _update_mac_setting(hw); 1046 _update_mac_setting(hw);
1036 rtl92c_dm_init(hw); 1047 rtl92c_dm_init(hw);
1048exit:
1049 local_irq_restore(flags);
1037 return err; 1050 return err;
1038} 1051}
1039 1052
@@ -1379,11 +1392,13 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1379{ 1392{
1380 struct rtl_priv *rtlpriv = rtl_priv(hw); 1393 struct rtl_priv *rtlpriv = rtl_priv(hw);
1381 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 1394 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1382 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); 1395 u32 reg_rcr;
1383 1396
1384 if (rtlpriv->psc.rfpwr_state != ERFON) 1397 if (rtlpriv->psc.rfpwr_state != ERFON)
1385 return; 1398 return;
1386 1399
1400 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1401
1387 if (check_bssid) { 1402 if (check_bssid) {
1388 u8 tmp; 1403 u8 tmp;
1389 if (IS_NORMAL_CHIP(rtlhal->version)) { 1404 if (IS_NORMAL_CHIP(rtlhal->version)) {
@@ -1795,7 +1810,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1795 e_aci); 1810 e_aci);
1796 break; 1811 break;
1797 } 1812 }
1798 if (rtlusb->acm_method != eAcmWay2_SW) 1813 if (rtlusb->acm_method != EACMWAY2_SW)
1799 rtlpriv->cfg->ops->set_hw_reg(hw, 1814 rtlpriv->cfg->ops->set_hw_reg(hw,
1800 HW_VAR_ACM_CTRL, &e_aci); 1815 HW_VAR_ACM_CTRL, &e_aci);
1801 break; 1816 break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 0c09240eadcc..9831ff1128ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../ps.h" 32#include "../ps.h"
33#include "../core.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -188,18 +189,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
188 } 189 }
189 if (configtype == BASEBAND_CONFIG_PHY_REG) { 190 if (configtype == BASEBAND_CONFIG_PHY_REG) {
190 for (i = 0; i < phy_reg_arraylen; i = i + 2) { 191 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
191 if (phy_regarray_table[i] == 0xfe) 192 rtl_addr_delay(phy_regarray_table[i]);
192 mdelay(50);
193 else if (phy_regarray_table[i] == 0xfd)
194 mdelay(5);
195 else if (phy_regarray_table[i] == 0xfc)
196 mdelay(1);
197 else if (phy_regarray_table[i] == 0xfb)
198 udelay(50);
199 else if (phy_regarray_table[i] == 0xfa)
200 udelay(5);
201 else if (phy_regarray_table[i] == 0xf9)
202 udelay(1);
203 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD, 193 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
204 phy_regarray_table[i + 1]); 194 phy_regarray_table[i + 1]);
205 udelay(1); 195 udelay(1);
@@ -236,18 +226,7 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
236 phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata; 226 phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
237 if (configtype == BASEBAND_CONFIG_PHY_REG) { 227 if (configtype == BASEBAND_CONFIG_PHY_REG) {
238 for (i = 0; i < phy_regarray_pg_len; i = i + 3) { 228 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
239 if (phy_regarray_table_pg[i] == 0xfe) 229 rtl_addr_delay(phy_regarray_table_pg[i]);
240 mdelay(50);
241 else if (phy_regarray_table_pg[i] == 0xfd)
242 mdelay(5);
243 else if (phy_regarray_table_pg[i] == 0xfc)
244 mdelay(1);
245 else if (phy_regarray_table_pg[i] == 0xfb)
246 udelay(50);
247 else if (phy_regarray_table_pg[i] == 0xfa)
248 udelay(5);
249 else if (phy_regarray_table_pg[i] == 0xf9)
250 udelay(1);
251 _rtl92c_store_pwrIndex_diffrate_offset(hw, 230 _rtl92c_store_pwrIndex_diffrate_offset(hw,
252 phy_regarray_table_pg[i], 231 phy_regarray_table_pg[i],
253 phy_regarray_table_pg[i + 1], 232 phy_regarray_table_pg[i + 1],
@@ -294,46 +273,16 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
294 switch (rfpath) { 273 switch (rfpath) {
295 case RF90_PATH_A: 274 case RF90_PATH_A:
296 for (i = 0; i < radioa_arraylen; i = i + 2) { 275 for (i = 0; i < radioa_arraylen; i = i + 2) {
297 if (radioa_array_table[i] == 0xfe) 276 rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
298 mdelay(50); 277 RFREG_OFFSET_MASK,
299 else if (radioa_array_table[i] == 0xfd) 278 radioa_array_table[i + 1]);
300 mdelay(5);
301 else if (radioa_array_table[i] == 0xfc)
302 mdelay(1);
303 else if (radioa_array_table[i] == 0xfb)
304 udelay(50);
305 else if (radioa_array_table[i] == 0xfa)
306 udelay(5);
307 else if (radioa_array_table[i] == 0xf9)
308 udelay(1);
309 else {
310 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
311 RFREG_OFFSET_MASK,
312 radioa_array_table[i + 1]);
313 udelay(1);
314 }
315 } 279 }
316 break; 280 break;
317 case RF90_PATH_B: 281 case RF90_PATH_B:
318 for (i = 0; i < radiob_arraylen; i = i + 2) { 282 for (i = 0; i < radiob_arraylen; i = i + 2) {
319 if (radiob_array_table[i] == 0xfe) { 283 rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
320 mdelay(50); 284 RFREG_OFFSET_MASK,
321 } else if (radiob_array_table[i] == 0xfd) 285 radiob_array_table[i + 1]);
322 mdelay(5);
323 else if (radiob_array_table[i] == 0xfc)
324 mdelay(1);
325 else if (radiob_array_table[i] == 0xfb)
326 udelay(50);
327 else if (radiob_array_table[i] == 0xfa)
328 udelay(5);
329 else if (radiob_array_table[i] == 0xf9)
330 udelay(1);
331 else {
332 rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
333 RFREG_OFFSET_MASK,
334 radiob_array_table[i + 1]);
335 udelay(1);
336 }
337 } 286 }
338 break; 287 break;
339 case RF90_PATH_C: 288 case RF90_PATH_C:
@@ -344,6 +293,8 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
344 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 293 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
345 "switch case not processed\n"); 294 "switch case not processed\n");
346 break; 295 break;
296 default:
297 break;
347 } 298 }
348 return true; 299 return true;
349} 300}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 1bc21ccfa71b..035e0dc3922c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -495,7 +495,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
495 495
496void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 496void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
497 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 497 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
498 struct ieee80211_tx_info *info, 498 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
499 struct ieee80211_sta *sta, 499 struct ieee80211_sta *sta,
500 struct sk_buff *skb, 500 struct sk_buff *skb,
501 u8 queue_index, 501 u8 queue_index,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 725c53accc58..fd8051dcd98a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,7 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
420 struct sk_buff_head *); 420 struct sk_buff_head *);
421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
422 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 422 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
423 struct ieee80211_tx_info *info, 423 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
424 struct ieee80211_sta *sta, 424 struct ieee80211_sta *sta,
425 struct sk_buff *skb, 425 struct sk_buff *skb,
426 u8 queue_index, 426 u8 queue_index,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 7908e1c85819..304c443b89b2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -194,15 +194,15 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
194 rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */ 194 rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
195 rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */ 195 rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
196 196
197 ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD); 197 ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
198 falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff); 198 falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
199 falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16); 199 falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
200 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD); 200 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
201 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); 201 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
202 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD); 202 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
203 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); 203 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
204 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); 204 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
205 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD); 205 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
206 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); 206 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
207 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + 207 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
208 falsealm_cnt->cnt_rate_illegal + 208 falsealm_cnt->cnt_rate_illegal +
@@ -214,9 +214,9 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
214 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) { 214 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
215 /* hold cck counter */ 215 /* hold cck counter */
216 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); 216 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
217 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0); 217 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
218 falsealm_cnt->cnt_cck_fail = ret_value; 218 falsealm_cnt->cnt_cck_fail = ret_value;
219 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3); 219 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
220 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; 220 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
221 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 221 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
222 } else { 222 } else {
@@ -331,11 +331,11 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
331 if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) { 331 if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
332 if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 332 if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
333 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); 333 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
334 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83); 334 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
335 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 335 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
336 } else { 336 } else {
337 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); 337 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
338 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd); 338 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
339 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 339 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
340 } 340 }
341 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; 341 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
@@ -722,7 +722,7 @@ static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
722 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 722 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
723 "===> Rx Gain %x\n", u4tmp); 723 "===> Rx Gain %x\n", u4tmp);
724 for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++) 724 for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
725 rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK, 725 rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
726 (rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp); 726 (rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
727} 727}
728 728
@@ -737,7 +737,7 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
737 /* Query CCK default setting From 0xa24 */ 737 /* Query CCK default setting From 0xa24 */
738 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); 738 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
739 temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, 739 temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
740 BMASKDWORD) & BMASKCCK; 740 MASKDWORD) & MASKCCK;
741 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 741 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
742 for (i = 0; i < CCK_TABLE_LENGTH; i++) { 742 for (i = 0; i < CCK_TABLE_LENGTH; i++) {
743 if (rtlpriv->dm.cck_inch14) { 743 if (rtlpriv->dm.cck_inch14) {
@@ -896,9 +896,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
896 rf = 1; 896 rf = 1;
897 if (thermalvalue) { 897 if (thermalvalue) {
898 ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 898 ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
899 BMASKDWORD) & BMASKOFDM_D; 899 MASKDWORD) & MASKOFDM_D;
900 for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { 900 for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
901 if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) { 901 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
902 ofdm_index_old[0] = (u8) i; 902 ofdm_index_old[0] = (u8) i;
903 903
904 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 904 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
@@ -910,10 +910,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
910 } 910 }
911 if (is2t) { 911 if (is2t) {
912 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 912 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
913 BMASKDWORD) & BMASKOFDM_D; 913 MASKDWORD) & MASKOFDM_D;
914 for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { 914 for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
915 if (ele_d == 915 if (ele_d ==
916 (ofdmswing_table[i] & BMASKOFDM_D)) { 916 (ofdmswing_table[i] & MASKOFDM_D)) {
917 ofdm_index_old[1] = (u8) i; 917 ofdm_index_old[1] = (u8) i;
918 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, 918 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
919 DBG_LOUD, 919 DBG_LOUD,
@@ -1091,10 +1091,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
1091 value32 = (ele_d << 22) | ((ele_c & 0x3F) << 1091 value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
1092 16) | ele_a; 1092 16) | ele_a;
1093 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 1093 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
1094 BMASKDWORD, value32); 1094 MASKDWORD, value32);
1095 1095
1096 value32 = (ele_c & 0x000003C0) >> 6; 1096 value32 = (ele_c & 0x000003C0) >> 6;
1097 rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS, 1097 rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
1098 value32); 1098 value32);
1099 1099
1100 value32 = ((val_x * ele_d) >> 7) & 0x01; 1100 value32 = ((val_x * ele_d) >> 7) & 0x01;
@@ -1103,10 +1103,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
1103 1103
1104 } else { 1104 } else {
1105 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 1105 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
1106 BMASKDWORD, 1106 MASKDWORD,
1107 ofdmswing_table 1107 ofdmswing_table
1108 [(u8)ofdm_index[0]]); 1108 [(u8)ofdm_index[0]]);
1109 rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS, 1109 rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
1110 0x00); 1110 0x00);
1111 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, 1111 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1112 BIT(24), 0x00); 1112 BIT(24), 0x00);
@@ -1204,21 +1204,21 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
1204 ele_a; 1204 ele_a;
1205 rtl_set_bbreg(hw, 1205 rtl_set_bbreg(hw,
1206 ROFDM0_XBTxIQIMBALANCE, 1206 ROFDM0_XBTxIQIMBALANCE,
1207 BMASKDWORD, value32); 1207 MASKDWORD, value32);
1208 value32 = (ele_c & 0x000003C0) >> 6; 1208 value32 = (ele_c & 0x000003C0) >> 6;
1209 rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 1209 rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
1210 BMASKH4BITS, value32); 1210 MASKH4BITS, value32);
1211 value32 = ((val_x * ele_d) >> 7) & 0x01; 1211 value32 = ((val_x * ele_d) >> 7) & 0x01;
1212 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, 1212 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1213 BIT(28), value32); 1213 BIT(28), value32);
1214 } else { 1214 } else {
1215 rtl_set_bbreg(hw, 1215 rtl_set_bbreg(hw,
1216 ROFDM0_XBTxIQIMBALANCE, 1216 ROFDM0_XBTxIQIMBALANCE,
1217 BMASKDWORD, 1217 MASKDWORD,
1218 ofdmswing_table 1218 ofdmswing_table
1219 [(u8) ofdm_index[1]]); 1219 [(u8) ofdm_index[1]]);
1220 rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 1220 rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
1221 BMASKH4BITS, 0x00); 1221 MASKH4BITS, 0x00);
1222 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, 1222 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1223 BIT(28), 0x00); 1223 BIT(28), 0x00);
1224 } 1224 }
@@ -1229,10 +1229,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
1229 } 1229 }
1230 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 1230 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1231 "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n", 1231 "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
1232 rtl_get_bbreg(hw, 0xc80, BMASKDWORD), 1232 rtl_get_bbreg(hw, 0xc80, MASKDWORD),
1233 rtl_get_bbreg(hw, 0xc94, BMASKDWORD), 1233 rtl_get_bbreg(hw, 0xc94, MASKDWORD),
1234 rtl_get_rfreg(hw, RF90_PATH_A, 0x24, 1234 rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
1235 BRFREGOFFSETMASK)); 1235 RFREG_OFFSET_MASK));
1236 } 1236 }
1237 if ((delta_iqk > rtlefuse->delta_iqk) && 1237 if ((delta_iqk > rtlefuse->delta_iqk) &&
1238 (rtlefuse->delta_iqk != 0)) { 1238 (rtlefuse->delta_iqk != 0)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index c4a7db9135d6..2b08671004a0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -318,7 +318,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
318 case HW_VAR_AC_PARAM: { 318 case HW_VAR_AC_PARAM: {
319 u8 e_aci = *val; 319 u8 e_aci = *val;
320 rtl92d_dm_init_edca_turbo(hw); 320 rtl92d_dm_init_edca_turbo(hw);
321 if (rtlpci->acm_method != eAcmWay2_SW) 321 if (rtlpci->acm_method != EACMWAY2_SW)
322 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, 322 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
323 &e_aci); 323 &e_aci);
324 break; 324 break;
@@ -985,9 +985,9 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
985 /* set default value after initialize RF, */ 985 /* set default value after initialize RF, */
986 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0); 986 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
987 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, 987 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
988 RF_CHNLBW, BRFREGOFFSETMASK); 988 RF_CHNLBW, RFREG_OFFSET_MASK);
989 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1, 989 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
990 RF_CHNLBW, BRFREGOFFSETMASK); 990 RF_CHNLBW, RFREG_OFFSET_MASK);
991 991
992 /*---- Set CCK and OFDM Block "ON"----*/ 992 /*---- Set CCK and OFDM Block "ON"----*/
993 if (rtlhal->current_bandtype == BAND_ON_2_4G) 993 if (rtlhal->current_bandtype == BAND_ON_2_4G)
@@ -1035,7 +1035,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
1035 1035
1036 tmp_rega = rtl_get_rfreg(hw, 1036 tmp_rega = rtl_get_rfreg(hw,
1037 (enum radio_path)RF90_PATH_A, 1037 (enum radio_path)RF90_PATH_A,
1038 0x2a, BMASKDWORD); 1038 0x2a, MASKDWORD);
1039 1039
1040 if (((tmp_rega & BIT(11)) == BIT(11))) 1040 if (((tmp_rega & BIT(11)) == BIT(11)))
1041 break; 1041 break;
@@ -1138,11 +1138,13 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
1138void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1138void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1139{ 1139{
1140 struct rtl_priv *rtlpriv = rtl_priv(hw); 1140 struct rtl_priv *rtlpriv = rtl_priv(hw);
1141 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1141 u32 reg_rcr;
1142 u32 reg_rcr = rtlpci->receive_config;
1143 1142
1144 if (rtlpriv->psc.rfpwr_state != ERFON) 1143 if (rtlpriv->psc.rfpwr_state != ERFON)
1145 return; 1144 return;
1145
1146 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1147
1146 if (check_bssid) { 1148 if (check_bssid) {
1147 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1149 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1148 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr)); 1150 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
@@ -1332,13 +1334,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
1332 /* c. ========RF OFF sequence========== */ 1334 /* c. ========RF OFF sequence========== */
1333 /* 0x88c[23:20] = 0xf. */ 1335 /* 0x88c[23:20] = 0xf. */
1334 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf); 1336 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
1335 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00); 1337 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
1336 1338
1337 /* APSD_CTRL 0x600[7:0] = 0x40 */ 1339 /* APSD_CTRL 0x600[7:0] = 0x40 */
1338 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); 1340 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
1339 1341
1340 /* Close antenna 0,0xc04,0xd04 */ 1342 /* Close antenna 0,0xc04,0xd04 */
1341 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0); 1343 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0);
1342 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0); 1344 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0);
1343 1345
1344 /* SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB state machine */ 1346 /* SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB state machine */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 13196cc4b1d3..3d1f0dd4e52d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../ps.h" 32#include "../ps.h"
33#include "../core.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -242,7 +243,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
242 else if (rtlhal->during_mac0init_radiob) 243 else if (rtlhal->during_mac0init_radiob)
243 /* mac0 use phy1 write radio_b. */ 244 /* mac0 use phy1 write radio_b. */
244 dbi_direct = BIT(3) | BIT(2); 245 dbi_direct = BIT(3) | BIT(2);
245 if (bitmask != BMASKDWORD) { 246 if (bitmask != MASKDWORD) {
246 if (rtlhal->during_mac1init_radioa || 247 if (rtlhal->during_mac1init_radioa ||
247 rtlhal->during_mac0init_radiob) 248 rtlhal->during_mac0init_radiob)
248 originalvalue = rtl92de_read_dword_dbi(hw, 249 originalvalue = rtl92de_read_dword_dbi(hw,
@@ -275,20 +276,20 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
275 u32 retvalue; 276 u32 retvalue;
276 277
277 newoffset = offset; 278 newoffset = offset;
278 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD); 279 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
279 if (rfpath == RF90_PATH_A) 280 if (rfpath == RF90_PATH_A)
280 tmplong2 = tmplong; 281 tmplong2 = tmplong;
281 else 282 else
282 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD); 283 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
283 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | 284 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
284 (newoffset << 23) | BLSSIREADEDGE; 285 (newoffset << 23) | BLSSIREADEDGE;
285 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD, 286 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
286 tmplong & (~BLSSIREADEDGE)); 287 tmplong & (~BLSSIREADEDGE));
287 udelay(10); 288 udelay(10);
288 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD, tmplong2); 289 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
289 udelay(50); 290 udelay(50);
290 udelay(50); 291 udelay(50);
291 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD, 292 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
292 tmplong | BLSSIREADEDGE); 293 tmplong | BLSSIREADEDGE);
293 udelay(10); 294 udelay(10);
294 if (rfpath == RF90_PATH_A) 295 if (rfpath == RF90_PATH_A)
@@ -321,7 +322,7 @@ static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
321 newoffset = offset; 322 newoffset = offset;
322 /* T65 RF */ 323 /* T65 RF */
323 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; 324 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
324 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, BMASKDWORD, data_and_addr); 325 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
325 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n", 326 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
326 rfpath, pphyreg->rf3wire_offset, data_and_addr); 327 rfpath, pphyreg->rf3wire_offset, data_and_addr);
327} 328}
@@ -362,7 +363,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
362 return; 363 return;
363 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); 364 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
364 if (rtlphy->rf_mode != RF_OP_BY_FW) { 365 if (rtlphy->rf_mode != RF_OP_BY_FW) {
365 if (bitmask != BRFREGOFFSETMASK) { 366 if (bitmask != RFREG_OFFSET_MASK) {
366 original_value = _rtl92d_phy_rf_serial_read(hw, 367 original_value = _rtl92d_phy_rf_serial_read(hw,
367 rfpath, regaddr); 368 rfpath, regaddr);
368 bitshift = _rtl92d_phy_calculate_bit_shift(bitmask); 369 bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
@@ -567,19 +568,8 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
567 " ===> phy:Rtl819XPHY_REG_Array_PG\n"); 568 " ===> phy:Rtl819XPHY_REG_Array_PG\n");
568 if (configtype == BASEBAND_CONFIG_PHY_REG) { 569 if (configtype == BASEBAND_CONFIG_PHY_REG) {
569 for (i = 0; i < phy_reg_arraylen; i = i + 2) { 570 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
570 if (phy_regarray_table[i] == 0xfe) 571 rtl_addr_delay(phy_regarray_table[i]);
571 mdelay(50); 572 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
572 else if (phy_regarray_table[i] == 0xfd)
573 mdelay(5);
574 else if (phy_regarray_table[i] == 0xfc)
575 mdelay(1);
576 else if (phy_regarray_table[i] == 0xfb)
577 udelay(50);
578 else if (phy_regarray_table[i] == 0xfa)
579 udelay(5);
580 else if (phy_regarray_table[i] == 0xf9)
581 udelay(1);
582 rtl_set_bbreg(hw, phy_regarray_table[i], BMASKDWORD,
583 phy_regarray_table[i + 1]); 573 phy_regarray_table[i + 1]);
584 udelay(1); 574 udelay(1);
585 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 575 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
@@ -591,7 +581,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
591 if (rtlhal->interfaceindex == 0) { 581 if (rtlhal->interfaceindex == 0) {
592 for (i = 0; i < agctab_arraylen; i = i + 2) { 582 for (i = 0; i < agctab_arraylen; i = i + 2) {
593 rtl_set_bbreg(hw, agctab_array_table[i], 583 rtl_set_bbreg(hw, agctab_array_table[i],
594 BMASKDWORD, 584 MASKDWORD,
595 agctab_array_table[i + 1]); 585 agctab_array_table[i + 1]);
596 /* Add 1us delay between BB/RF register 586 /* Add 1us delay between BB/RF register
597 * setting. */ 587 * setting. */
@@ -607,7 +597,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
607 if (rtlhal->current_bandtype == BAND_ON_2_4G) { 597 if (rtlhal->current_bandtype == BAND_ON_2_4G) {
608 for (i = 0; i < agctab_arraylen; i = i + 2) { 598 for (i = 0; i < agctab_arraylen; i = i + 2) {
609 rtl_set_bbreg(hw, agctab_array_table[i], 599 rtl_set_bbreg(hw, agctab_array_table[i],
610 BMASKDWORD, 600 MASKDWORD,
611 agctab_array_table[i + 1]); 601 agctab_array_table[i + 1]);
612 /* Add 1us delay between BB/RF register 602 /* Add 1us delay between BB/RF register
613 * setting. */ 603 * setting. */
@@ -623,7 +613,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
623 for (i = 0; i < agctab_5garraylen; i = i + 2) { 613 for (i = 0; i < agctab_5garraylen; i = i + 2) {
624 rtl_set_bbreg(hw, 614 rtl_set_bbreg(hw,
625 agctab_5garray_table[i], 615 agctab_5garray_table[i],
626 BMASKDWORD, 616 MASKDWORD,
627 agctab_5garray_table[i + 1]); 617 agctab_5garray_table[i + 1]);
628 /* Add 1us delay between BB/RF registeri 618 /* Add 1us delay between BB/RF registeri
629 * setting. */ 619 * setting. */
@@ -705,18 +695,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
705 phy_regarray_table_pg = rtl8192de_phy_reg_array_pg; 695 phy_regarray_table_pg = rtl8192de_phy_reg_array_pg;
706 if (configtype == BASEBAND_CONFIG_PHY_REG) { 696 if (configtype == BASEBAND_CONFIG_PHY_REG) {
707 for (i = 0; i < phy_regarray_pg_len; i = i + 3) { 697 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
708 if (phy_regarray_table_pg[i] == 0xfe) 698 rtl_addr_delay(phy_regarray_table_pg[i]);
709 mdelay(50);
710 else if (phy_regarray_table_pg[i] == 0xfd)
711 mdelay(5);
712 else if (phy_regarray_table_pg[i] == 0xfc)
713 mdelay(1);
714 else if (phy_regarray_table_pg[i] == 0xfb)
715 udelay(50);
716 else if (phy_regarray_table_pg[i] == 0xfa)
717 udelay(5);
718 else if (phy_regarray_table_pg[i] == 0xf9)
719 udelay(1);
720 _rtl92d_store_pwrindex_diffrate_offset(hw, 699 _rtl92d_store_pwrindex_diffrate_offset(hw,
721 phy_regarray_table_pg[i], 700 phy_regarray_table_pg[i],
722 phy_regarray_table_pg[i + 1], 701 phy_regarray_table_pg[i + 1],
@@ -843,54 +822,16 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
843 switch (rfpath) { 822 switch (rfpath) {
844 case RF90_PATH_A: 823 case RF90_PATH_A:
845 for (i = 0; i < radioa_arraylen; i = i + 2) { 824 for (i = 0; i < radioa_arraylen; i = i + 2) {
846 if (radioa_array_table[i] == 0xfe) { 825 rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
847 mdelay(50); 826 RFREG_OFFSET_MASK,
848 } else if (radioa_array_table[i] == 0xfd) { 827 radioa_array_table[i + 1]);
849 /* delay_ms(5); */
850 mdelay(5);
851 } else if (radioa_array_table[i] == 0xfc) {
852 /* delay_ms(1); */
853 mdelay(1);
854 } else if (radioa_array_table[i] == 0xfb) {
855 udelay(50);
856 } else if (radioa_array_table[i] == 0xfa) {
857 udelay(5);
858 } else if (radioa_array_table[i] == 0xf9) {
859 udelay(1);
860 } else {
861 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
862 BRFREGOFFSETMASK,
863 radioa_array_table[i + 1]);
864 /* Add 1us delay between BB/RF register set. */
865 udelay(1);
866 }
867 } 828 }
868 break; 829 break;
869 case RF90_PATH_B: 830 case RF90_PATH_B:
870 for (i = 0; i < radiob_arraylen; i = i + 2) { 831 for (i = 0; i < radiob_arraylen; i = i + 2) {
871 if (radiob_array_table[i] == 0xfe) { 832 rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
872 /* Delay specific ms. Only RF configuration 833 RFREG_OFFSET_MASK,
873 * requires delay. */ 834 radiob_array_table[i + 1]);
874 mdelay(50);
875 } else if (radiob_array_table[i] == 0xfd) {
876 /* delay_ms(5); */
877 mdelay(5);
878 } else if (radiob_array_table[i] == 0xfc) {
879 /* delay_ms(1); */
880 mdelay(1);
881 } else if (radiob_array_table[i] == 0xfb) {
882 udelay(50);
883 } else if (radiob_array_table[i] == 0xfa) {
884 udelay(5);
885 } else if (radiob_array_table[i] == 0xf9) {
886 udelay(1);
887 } else {
888 rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
889 BRFREGOFFSETMASK,
890 radiob_array_table[i + 1]);
891 /* Add 1us delay between BB/RF register set. */
892 udelay(1);
893 }
894 } 835 }
895 break; 836 break;
896 case RF90_PATH_C: 837 case RF90_PATH_C:
@@ -911,13 +852,13 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
911 struct rtl_phy *rtlphy = &(rtlpriv->phy); 852 struct rtl_phy *rtlphy = &(rtlpriv->phy);
912 853
913 rtlphy->default_initialgain[0] = 854 rtlphy->default_initialgain[0] =
914 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, BMASKBYTE0); 855 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
915 rtlphy->default_initialgain[1] = 856 rtlphy->default_initialgain[1] =
916 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, BMASKBYTE0); 857 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
917 rtlphy->default_initialgain[2] = 858 rtlphy->default_initialgain[2] =
918 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, BMASKBYTE0); 859 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
919 rtlphy->default_initialgain[3] = 860 rtlphy->default_initialgain[3] =
920 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, BMASKBYTE0); 861 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
921 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 862 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
922 "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", 863 "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
923 rtlphy->default_initialgain[0], 864 rtlphy->default_initialgain[0],
@@ -925,9 +866,9 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
925 rtlphy->default_initialgain[2], 866 rtlphy->default_initialgain[2],
926 rtlphy->default_initialgain[3]); 867 rtlphy->default_initialgain[3]);
927 rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, 868 rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
928 BMASKBYTE0); 869 MASKBYTE0);
929 rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, 870 rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
930 BMASKDWORD); 871 MASKDWORD);
931 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 872 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
932 "Default framesync (0x%x) = 0x%x\n", 873 "Default framesync (0x%x) = 0x%x\n",
933 ROFDM0_RXDETECTOR3, rtlphy->framesync); 874 ROFDM0_RXDETECTOR3, rtlphy->framesync);
@@ -1106,7 +1047,7 @@ static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
1106{ 1047{
1107 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0); 1048 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0);
1108 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0); 1049 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0);
1109 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x00); 1050 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x00);
1110 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0); 1051 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0);
1111} 1052}
1112 1053
@@ -1168,7 +1109,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
1168{ 1109{
1169 struct rtl_priv *rtlpriv = rtl_priv(hw); 1110 struct rtl_priv *rtlpriv = rtl_priv(hw);
1170 u32 imr_num = MAX_RF_IMR_INDEX; 1111 u32 imr_num = MAX_RF_IMR_INDEX;
1171 u32 rfmask = BRFREGOFFSETMASK; 1112 u32 rfmask = RFREG_OFFSET_MASK;
1172 u8 group, i; 1113 u8 group, i;
1173 unsigned long flag = 0; 1114 unsigned long flag = 0;
1174 1115
@@ -1211,7 +1152,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
1211 for (i = 0; i < imr_num; i++) { 1152 for (i = 0; i < imr_num; i++) {
1212 rtl_set_rfreg(hw, (enum radio_path)rfpath, 1153 rtl_set_rfreg(hw, (enum radio_path)rfpath,
1213 rf_reg_for_5g_swchnl_normal[i], 1154 rf_reg_for_5g_swchnl_normal[i],
1214 BRFREGOFFSETMASK, 1155 RFREG_OFFSET_MASK,
1215 rf_imr_param_normal[0][0][i]); 1156 rf_imr_param_normal[0][0][i]);
1216 } 1157 }
1217 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 1158 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
@@ -1329,7 +1270,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
1329 if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) { 1270 if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) {
1330 rtl_set_rfreg(hw, (enum radio_path)path, 1271 rtl_set_rfreg(hw, (enum radio_path)path,
1331 rf_reg_for_c_cut_5g[i], 1272 rf_reg_for_c_cut_5g[i],
1332 BRFREGOFFSETMASK, 0xE439D); 1273 RFREG_OFFSET_MASK, 0xE439D);
1333 } else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) { 1274 } else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) {
1334 u4tmp2 = (rf_reg_pram_c_5g[index][i] & 1275 u4tmp2 = (rf_reg_pram_c_5g[index][i] &
1335 0x7FF) | (u4tmp << 11); 1276 0x7FF) | (u4tmp << 11);
@@ -1337,11 +1278,11 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
1337 u4tmp2 &= ~(BIT(7) | BIT(6)); 1278 u4tmp2 &= ~(BIT(7) | BIT(6));
1338 rtl_set_rfreg(hw, (enum radio_path)path, 1279 rtl_set_rfreg(hw, (enum radio_path)path,
1339 rf_reg_for_c_cut_5g[i], 1280 rf_reg_for_c_cut_5g[i],
1340 BRFREGOFFSETMASK, u4tmp2); 1281 RFREG_OFFSET_MASK, u4tmp2);
1341 } else { 1282 } else {
1342 rtl_set_rfreg(hw, (enum radio_path)path, 1283 rtl_set_rfreg(hw, (enum radio_path)path,
1343 rf_reg_for_c_cut_5g[i], 1284 rf_reg_for_c_cut_5g[i],
1344 BRFREGOFFSETMASK, 1285 RFREG_OFFSET_MASK,
1345 rf_reg_pram_c_5g[index][i]); 1286 rf_reg_pram_c_5g[index][i]);
1346 } 1287 }
1347 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, 1288 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1351,7 +1292,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
1351 path, index, 1292 path, index,
1352 rtl_get_rfreg(hw, (enum radio_path)path, 1293 rtl_get_rfreg(hw, (enum radio_path)path,
1353 rf_reg_for_c_cut_5g[i], 1294 rf_reg_for_c_cut_5g[i],
1354 BRFREGOFFSETMASK)); 1295 RFREG_OFFSET_MASK));
1355 } 1296 }
1356 if (need_pwr_down) 1297 if (need_pwr_down)
1357 _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); 1298 _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1381,7 +1322,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
1381 i++) { 1322 i++) {
1382 rtl_set_rfreg(hw, rfpath, 1323 rtl_set_rfreg(hw, rfpath,
1383 rf_for_c_cut_5g_internal_pa[i], 1324 rf_for_c_cut_5g_internal_pa[i],
1384 BRFREGOFFSETMASK, 1325 RFREG_OFFSET_MASK,
1385 rf_pram_c_5g_int_pa[index][i]); 1326 rf_pram_c_5g_int_pa[index][i]);
1386 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, 1327 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
1387 "offset 0x%x value 0x%x path %d index %d\n", 1328 "offset 0x%x value 0x%x path %d index %d\n",
@@ -1422,13 +1363,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
1422 if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7) 1363 if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7)
1423 rtl_set_rfreg(hw, (enum radio_path)path, 1364 rtl_set_rfreg(hw, (enum radio_path)path,
1424 rf_reg_for_c_cut_2g[i], 1365 rf_reg_for_c_cut_2g[i],
1425 BRFREGOFFSETMASK, 1366 RFREG_OFFSET_MASK,
1426 (rf_reg_param_for_c_cut_2g[index][i] | 1367 (rf_reg_param_for_c_cut_2g[index][i] |
1427 BIT(17))); 1368 BIT(17)));
1428 else 1369 else
1429 rtl_set_rfreg(hw, (enum radio_path)path, 1370 rtl_set_rfreg(hw, (enum radio_path)path,
1430 rf_reg_for_c_cut_2g[i], 1371 rf_reg_for_c_cut_2g[i],
1431 BRFREGOFFSETMASK, 1372 RFREG_OFFSET_MASK,
1432 rf_reg_param_for_c_cut_2g 1373 rf_reg_param_for_c_cut_2g
1433 [index][i]); 1374 [index][i]);
1434 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, 1375 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1438,14 +1379,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
1438 rf_reg_mask_for_c_cut_2g[i], path, index, 1379 rf_reg_mask_for_c_cut_2g[i], path, index,
1439 rtl_get_rfreg(hw, (enum radio_path)path, 1380 rtl_get_rfreg(hw, (enum radio_path)path,
1440 rf_reg_for_c_cut_2g[i], 1381 rf_reg_for_c_cut_2g[i],
1441 BRFREGOFFSETMASK)); 1382 RFREG_OFFSET_MASK));
1442 } 1383 }
1443 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1384 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1444 "cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", 1385 "cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
1445 rf_syn_g4_for_c_cut_2g | (u4tmp << 11)); 1386 rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
1446 1387
1447 rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4, 1388 rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
1448 BRFREGOFFSETMASK, 1389 RFREG_OFFSET_MASK,
1449 rf_syn_g4_for_c_cut_2g | (u4tmp << 11)); 1390 rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
1450 if (need_pwr_down) 1391 if (need_pwr_down)
1451 _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); 1392 _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1493,41 +1434,41 @@ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
1493 /* path-A IQK setting */ 1434 /* path-A IQK setting */
1494 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n"); 1435 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
1495 if (rtlhal->interfaceindex == 0) { 1436 if (rtlhal->interfaceindex == 0) {
1496 rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c1f); 1437 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
1497 rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c1f); 1438 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
1498 } else { 1439 } else {
1499 rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c22); 1440 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c22);
1500 rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c22); 1441 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c22);
1501 } 1442 }
1502 rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140102); 1443 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
1503 rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x28160206); 1444 rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160206);
1504 /* path-B IQK setting */ 1445 /* path-B IQK setting */
1505 if (configpathb) { 1446 if (configpathb) {
1506 rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x10008c22); 1447 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
1507 rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x10008c22); 1448 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
1508 rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140102); 1449 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
1509 rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x28160206); 1450 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160206);
1510 } 1451 }
1511 /* LO calibration setting */ 1452 /* LO calibration setting */
1512 RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n"); 1453 RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
1513 rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911); 1454 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
1514 /* One shot, path A LOK & IQK */ 1455 /* One shot, path A LOK & IQK */
1515 RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n"); 1456 RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
1516 rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000); 1457 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1517 rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000); 1458 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1518 /* delay x ms */ 1459 /* delay x ms */
1519 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1460 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1520 "Delay %d ms for One shot, path A LOK & IQK\n", 1461 "Delay %d ms for One shot, path A LOK & IQK\n",
1521 IQK_DELAY_TIME); 1462 IQK_DELAY_TIME);
1522 mdelay(IQK_DELAY_TIME); 1463 mdelay(IQK_DELAY_TIME);
1523 /* Check failed */ 1464 /* Check failed */
1524 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); 1465 regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1525 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); 1466 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
1526 rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD); 1467 rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1527 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94); 1468 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
1528 rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD); 1469 rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1529 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c); 1470 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
1530 regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD); 1471 regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
1531 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4); 1472 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
1532 if (!(regeac & BIT(28)) && (((rege94 & 0x03FF0000) >> 16) != 0x142) && 1473 if (!(regeac & BIT(28)) && (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
1533 (((rege9c & 0x03FF0000) >> 16) != 0x42)) 1474 (((rege9c & 0x03FF0000) >> 16) != 0x42))
@@ -1563,42 +1504,42 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
1563 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n"); 1504 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n");
1564 /* path-A IQK setting */ 1505 /* path-A IQK setting */
1565 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n"); 1506 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
1566 rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f); 1507 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
1567 rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f); 1508 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
1568 rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140307); 1509 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140307);
1569 rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68160960); 1510 rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68160960);
1570 /* path-B IQK setting */ 1511 /* path-B IQK setting */
1571 if (configpathb) { 1512 if (configpathb) {
1572 rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f); 1513 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
1573 rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f); 1514 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
1574 rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82110000); 1515 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82110000);
1575 rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68110000); 1516 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68110000);
1576 } 1517 }
1577 /* LO calibration setting */ 1518 /* LO calibration setting */
1578 RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n"); 1519 RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
1579 rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911); 1520 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
1580 /* path-A PA on */ 1521 /* path-A PA on */
1581 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x07000f60); 1522 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x07000f60);
1582 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD, 0x66e60e30); 1523 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, 0x66e60e30);
1583 for (i = 0; i < retrycount; i++) { 1524 for (i = 0; i < retrycount; i++) {
1584 /* One shot, path A LOK & IQK */ 1525 /* One shot, path A LOK & IQK */
1585 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1526 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1586 "One shot, path A LOK & IQK!\n"); 1527 "One shot, path A LOK & IQK!\n");
1587 rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000); 1528 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1588 rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000); 1529 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1589 /* delay x ms */ 1530 /* delay x ms */
1590 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1531 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1591 "Delay %d ms for One shot, path A LOK & IQK.\n", 1532 "Delay %d ms for One shot, path A LOK & IQK.\n",
1592 IQK_DELAY_TIME); 1533 IQK_DELAY_TIME);
1593 mdelay(IQK_DELAY_TIME * 10); 1534 mdelay(IQK_DELAY_TIME * 10);
1594 /* Check failed */ 1535 /* Check failed */
1595 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); 1536 regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1596 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); 1537 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
1597 rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD); 1538 rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1598 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94); 1539 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
1599 rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD); 1540 rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1600 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c); 1541 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
1601 regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD); 1542 regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
1602 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4); 1543 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
1603 if (!(regeac & TxOKBit) && 1544 if (!(regeac & TxOKBit) &&
1604 (((rege94 & 0x03FF0000) >> 16) != 0x142)) { 1545 (((rege94 & 0x03FF0000) >> 16) != 0x142)) {
@@ -1620,9 +1561,9 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
1620 } 1561 }
1621 } 1562 }
1622 /* path A PA off */ 1563 /* path A PA off */
1623 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 1564 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
1624 rtlphy->iqk_bb_backup[0]); 1565 rtlphy->iqk_bb_backup[0]);
1625 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD, 1566 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD,
1626 rtlphy->iqk_bb_backup[1]); 1567 rtlphy->iqk_bb_backup[1]);
1627 return result; 1568 return result;
1628} 1569}
@@ -1637,22 +1578,22 @@ static u8 _rtl92d_phy_pathb_iqk(struct ieee80211_hw *hw)
1637 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n"); 1578 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
1638 /* One shot, path B LOK & IQK */ 1579 /* One shot, path B LOK & IQK */
1639 RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n"); 1580 RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
1640 rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000002); 1581 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
1641 rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000000); 1582 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
1642 /* delay x ms */ 1583 /* delay x ms */
1643 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1584 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1644 "Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME); 1585 "Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME);
1645 mdelay(IQK_DELAY_TIME); 1586 mdelay(IQK_DELAY_TIME);
1646 /* Check failed */ 1587 /* Check failed */
1647 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); 1588 regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1648 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); 1589 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
1649 regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD); 1590 regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1650 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4); 1591 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
1651 regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD); 1592 regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1652 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc); 1593 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
1653 regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD); 1594 regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1654 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4); 1595 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
1655 regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD); 1596 regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1656 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc); 1597 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
1657 if (!(regeac & BIT(31)) && (((regeb4 & 0x03FF0000) >> 16) != 0x142) && 1598 if (!(regeac & BIT(31)) && (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
1658 (((regebc & 0x03FF0000) >> 16) != 0x42)) 1599 (((regebc & 0x03FF0000) >> 16) != 0x42))
@@ -1680,31 +1621,31 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
1680 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n"); 1621 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
1681 /* path-A IQK setting */ 1622 /* path-A IQK setting */
1682 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n"); 1623 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
1683 rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f); 1624 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
1684 rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f); 1625 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
1685 rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82110000); 1626 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82110000);
1686 rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68110000); 1627 rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68110000);
1687 1628
1688 /* path-B IQK setting */ 1629 /* path-B IQK setting */
1689 rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f); 1630 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
1690 rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f); 1631 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
1691 rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140307); 1632 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140307);
1692 rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68160960); 1633 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68160960);
1693 1634
1694 /* LO calibration setting */ 1635 /* LO calibration setting */
1695 RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n"); 1636 RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
1696 rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911); 1637 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
1697 1638
1698 /* path-B PA on */ 1639 /* path-B PA on */
1699 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x0f600700); 1640 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x0f600700);
1700 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD, 0x061f0d30); 1641 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, 0x061f0d30);
1701 1642
1702 for (i = 0; i < retrycount; i++) { 1643 for (i = 0; i < retrycount; i++) {
1703 /* One shot, path B LOK & IQK */ 1644 /* One shot, path B LOK & IQK */
1704 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1645 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1705 "One shot, path A LOK & IQK!\n"); 1646 "One shot, path A LOK & IQK!\n");
1706 rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xfa000000); 1647 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xfa000000);
1707 rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000); 1648 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1708 1649
1709 /* delay x ms */ 1650 /* delay x ms */
1710 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1651 RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -1712,15 +1653,15 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
1712 mdelay(IQK_DELAY_TIME * 10); 1653 mdelay(IQK_DELAY_TIME * 10);
1713 1654
1714 /* Check failed */ 1655 /* Check failed */
1715 regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD); 1656 regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1716 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); 1657 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
1717 regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD); 1658 regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1718 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4); 1659 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
1719 regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD); 1660 regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1720 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc); 1661 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
1721 regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD); 1662 regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1722 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4); 1663 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
1723 regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD); 1664 regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1724 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc); 1665 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
1725 if (!(regeac & BIT(31)) && 1666 if (!(regeac & BIT(31)) &&
1726 (((regeb4 & 0x03FF0000) >> 16) != 0x142)) 1667 (((regeb4 & 0x03FF0000) >> 16) != 0x142))
@@ -1738,9 +1679,9 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
1738 } 1679 }
1739 1680
1740 /* path B PA off */ 1681 /* path B PA off */
1741 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 1682 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
1742 rtlphy->iqk_bb_backup[0]); 1683 rtlphy->iqk_bb_backup[0]);
1743 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD, 1684 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD,
1744 rtlphy->iqk_bb_backup[2]); 1685 rtlphy->iqk_bb_backup[2]);
1745 return result; 1686 return result;
1746} 1687}
@@ -1754,7 +1695,7 @@ static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
1754 1695
1755 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n"); 1696 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
1756 for (i = 0; i < regnum; i++) 1697 for (i = 0; i < regnum; i++)
1757 adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], BMASKDWORD); 1698 adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
1758} 1699}
1759 1700
1760static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw, 1701static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
@@ -1779,7 +1720,7 @@ static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
1779 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1720 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1780 "Reload ADDA power saving parameters !\n"); 1721 "Reload ADDA power saving parameters !\n");
1781 for (i = 0; i < regnum; i++) 1722 for (i = 0; i < regnum; i++)
1782 rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, adda_backup[i]); 1723 rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, adda_backup[i]);
1783} 1724}
1784 1725
1785static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw, 1726static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
@@ -1807,7 +1748,7 @@ static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
1807 pathon = rtlpriv->rtlhal.interfaceindex == 0 ? 1748 pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
1808 0x04db25a4 : 0x0b1b25a4; 1749 0x04db25a4 : 0x0b1b25a4;
1809 for (i = 0; i < IQK_ADDA_REG_NUM; i++) 1750 for (i = 0; i < IQK_ADDA_REG_NUM; i++)
1810 rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, pathon); 1751 rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
1811} 1752}
1812 1753
1813static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw, 1754static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
@@ -1830,9 +1771,9 @@ static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
1830 struct rtl_priv *rtlpriv = rtl_priv(hw); 1771 struct rtl_priv *rtlpriv = rtl_priv(hw);
1831 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n"); 1772 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n");
1832 1773
1833 rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x0); 1774 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1834 rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD, 0x00010000); 1775 rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, 0x00010000);
1835 rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000); 1776 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1836} 1777}
1837 1778
1838static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode) 1779static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
@@ -1843,8 +1784,8 @@ static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
1843 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1784 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1844 "BB Switch to %s mode!\n", pi_mode ? "PI" : "SI"); 1785 "BB Switch to %s mode!\n", pi_mode ? "PI" : "SI");
1845 mode = pi_mode ? 0x01000100 : 0x01000000; 1786 mode = pi_mode ? 0x01000100 : 0x01000000;
1846 rtl_set_bbreg(hw, 0x820, BMASKDWORD, mode); 1787 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1847 rtl_set_bbreg(hw, 0x828, BMASKDWORD, mode); 1788 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1848} 1789}
1849 1790
1850static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8], 1791static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
@@ -1875,7 +1816,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
1875 1816
1876 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n"); 1817 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n");
1877 if (t == 0) { 1818 if (t == 0) {
1878 bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD); 1819 bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
1879 RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue); 1820 RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
1880 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n", 1821 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
1881 is2t ? "2T2R" : "1T1R"); 1822 is2t ? "2T2R" : "1T1R");
@@ -1898,40 +1839,40 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
1898 _rtl92d_phy_pimode_switch(hw, true); 1839 _rtl92d_phy_pimode_switch(hw, true);
1899 1840
1900 rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00); 1841 rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
1901 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600); 1842 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
1902 rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4); 1843 rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
1903 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22204000); 1844 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22204000);
1904 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f); 1845 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
1905 if (is2t) { 1846 if (is2t) {
1906 rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD, 1847 rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD,
1907 0x00010000); 1848 0x00010000);
1908 rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, BMASKDWORD, 1849 rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, MASKDWORD,
1909 0x00010000); 1850 0x00010000);
1910 } 1851 }
1911 /* MAC settings */ 1852 /* MAC settings */
1912 _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg, 1853 _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
1913 rtlphy->iqk_mac_backup); 1854 rtlphy->iqk_mac_backup);
1914 /* Page B init */ 1855 /* Page B init */
1915 rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000); 1856 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
1916 if (is2t) 1857 if (is2t)
1917 rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000); 1858 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
1918 /* IQ calibration setting */ 1859 /* IQ calibration setting */
1919 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n"); 1860 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
1920 rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000); 1861 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1921 rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x01007c00); 1862 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1922 rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800); 1863 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
1923 for (i = 0; i < retrycount; i++) { 1864 for (i = 0; i < retrycount; i++) {
1924 patha_ok = _rtl92d_phy_patha_iqk(hw, is2t); 1865 patha_ok = _rtl92d_phy_patha_iqk(hw, is2t);
1925 if (patha_ok == 0x03) { 1866 if (patha_ok == 0x03) {
1926 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1867 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1927 "Path A IQK Success!!\n"); 1868 "Path A IQK Success!!\n");
1928 result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) & 1869 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1929 0x3FF0000) >> 16; 1870 0x3FF0000) >> 16;
1930 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) & 1871 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1931 0x3FF0000) >> 16; 1872 0x3FF0000) >> 16;
1932 result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) & 1873 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1933 0x3FF0000) >> 16; 1874 0x3FF0000) >> 16;
1934 result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) & 1875 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1935 0x3FF0000) >> 16; 1876 0x3FF0000) >> 16;
1936 break; 1877 break;
1937 } else if (i == (retrycount - 1) && patha_ok == 0x01) { 1878 } else if (i == (retrycount - 1) && patha_ok == 0x01) {
@@ -1939,9 +1880,9 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
1939 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1880 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1940 "Path A IQK Only Tx Success!!\n"); 1881 "Path A IQK Only Tx Success!!\n");
1941 1882
1942 result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) & 1883 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1943 0x3FF0000) >> 16; 1884 0x3FF0000) >> 16;
1944 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) & 1885 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1945 0x3FF0000) >> 16; 1886 0x3FF0000) >> 16;
1946 } 1887 }
1947 } 1888 }
@@ -1957,22 +1898,22 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
1957 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1898 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1958 "Path B IQK Success!!\n"); 1899 "Path B IQK Success!!\n");
1959 result[t][4] = (rtl_get_bbreg(hw, 0xeb4, 1900 result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
1960 BMASKDWORD) & 0x3FF0000) >> 16; 1901 MASKDWORD) & 0x3FF0000) >> 16;
1961 result[t][5] = (rtl_get_bbreg(hw, 0xebc, 1902 result[t][5] = (rtl_get_bbreg(hw, 0xebc,
1962 BMASKDWORD) & 0x3FF0000) >> 16; 1903 MASKDWORD) & 0x3FF0000) >> 16;
1963 result[t][6] = (rtl_get_bbreg(hw, 0xec4, 1904 result[t][6] = (rtl_get_bbreg(hw, 0xec4,
1964 BMASKDWORD) & 0x3FF0000) >> 16; 1905 MASKDWORD) & 0x3FF0000) >> 16;
1965 result[t][7] = (rtl_get_bbreg(hw, 0xecc, 1906 result[t][7] = (rtl_get_bbreg(hw, 0xecc,
1966 BMASKDWORD) & 0x3FF0000) >> 16; 1907 MASKDWORD) & 0x3FF0000) >> 16;
1967 break; 1908 break;
1968 } else if (i == (retrycount - 1) && pathb_ok == 0x01) { 1909 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1969 /* Tx IQK OK */ 1910 /* Tx IQK OK */
1970 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1911 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1971 "Path B Only Tx IQK Success!!\n"); 1912 "Path B Only Tx IQK Success!!\n");
1972 result[t][4] = (rtl_get_bbreg(hw, 0xeb4, 1913 result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
1973 BMASKDWORD) & 0x3FF0000) >> 16; 1914 MASKDWORD) & 0x3FF0000) >> 16;
1974 result[t][5] = (rtl_get_bbreg(hw, 0xebc, 1915 result[t][5] = (rtl_get_bbreg(hw, 0xebc,
1975 BMASKDWORD) & 0x3FF0000) >> 16; 1916 MASKDWORD) & 0x3FF0000) >> 16;
1976 } 1917 }
1977 } 1918 }
1978 if (0x00 == pathb_ok) 1919 if (0x00 == pathb_ok)
@@ -1984,7 +1925,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
1984 RTPRINT(rtlpriv, FINIT, INIT_IQK, 1925 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1985 "IQK:Back to BB mode, load original value!\n"); 1926 "IQK:Back to BB mode, load original value!\n");
1986 1927
1987 rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0); 1928 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1988 if (t != 0) { 1929 if (t != 0) {
1989 /* Switch back BB to SI mode after finish IQ Calibration. */ 1930 /* Switch back BB to SI mode after finish IQ Calibration. */
1990 if (!rtlphy->rfpi_enable) 1931 if (!rtlphy->rfpi_enable)
@@ -2004,8 +1945,8 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
2004 rtlphy->iqk_bb_backup, 1945 rtlphy->iqk_bb_backup,
2005 IQK_BB_REG_NUM - 1); 1946 IQK_BB_REG_NUM - 1);
2006 /* load 0xe30 IQC default value */ 1947 /* load 0xe30 IQC default value */
2007 rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x01008c00); 1948 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
2008 rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x01008c00); 1949 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
2009 } 1950 }
2010 RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n"); 1951 RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
2011} 1952}
@@ -2042,7 +1983,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
2042 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n"); 1983 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n");
2043 mdelay(IQK_DELAY_TIME * 20); 1984 mdelay(IQK_DELAY_TIME * 20);
2044 if (t == 0) { 1985 if (t == 0) {
2045 bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD); 1986 bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
2046 RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue); 1987 RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
2047 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n", 1988 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
2048 is2t ? "2T2R" : "1T1R"); 1989 is2t ? "2T2R" : "1T1R");
@@ -2072,38 +2013,38 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
2072 if (!rtlphy->rfpi_enable) 2013 if (!rtlphy->rfpi_enable)
2073 _rtl92d_phy_pimode_switch(hw, true); 2014 _rtl92d_phy_pimode_switch(hw, true);
2074 rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00); 2015 rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
2075 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600); 2016 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
2076 rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4); 2017 rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
2077 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22208000); 2018 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208000);
2078 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f); 2019 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
2079 2020
2080 /* Page B init */ 2021 /* Page B init */
2081 rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000); 2022 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
2082 if (is2t) 2023 if (is2t)
2083 rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000); 2024 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
2084 /* IQ calibration setting */ 2025 /* IQ calibration setting */
2085 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n"); 2026 RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
2086 rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000); 2027 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
2087 rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x10007c00); 2028 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x10007c00);
2088 rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800); 2029 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
2089 patha_ok = _rtl92d_phy_patha_iqk_5g_normal(hw, is2t); 2030 patha_ok = _rtl92d_phy_patha_iqk_5g_normal(hw, is2t);
2090 if (patha_ok == 0x03) { 2031 if (patha_ok == 0x03) {
2091 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n"); 2032 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n");
2092 result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) & 2033 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
2093 0x3FF0000) >> 16; 2034 0x3FF0000) >> 16;
2094 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) & 2035 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
2095 0x3FF0000) >> 16; 2036 0x3FF0000) >> 16;
2096 result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) & 2037 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
2097 0x3FF0000) >> 16; 2038 0x3FF0000) >> 16;
2098 result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) & 2039 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
2099 0x3FF0000) >> 16; 2040 0x3FF0000) >> 16;
2100 } else if (patha_ok == 0x01) { /* Tx IQK OK */ 2041 } else if (patha_ok == 0x01) { /* Tx IQK OK */
2101 RTPRINT(rtlpriv, FINIT, INIT_IQK, 2042 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2102 "Path A IQK Only Tx Success!!\n"); 2043 "Path A IQK Only Tx Success!!\n");
2103 2044
2104 result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) & 2045 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
2105 0x3FF0000) >> 16; 2046 0x3FF0000) >> 16;
2106 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) & 2047 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
2107 0x3FF0000) >> 16; 2048 0x3FF0000) >> 16;
2108 } else { 2049 } else {
2109 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n"); 2050 RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n");
@@ -2116,20 +2057,20 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
2116 if (pathb_ok == 0x03) { 2057 if (pathb_ok == 0x03) {
2117 RTPRINT(rtlpriv, FINIT, INIT_IQK, 2058 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2118 "Path B IQK Success!!\n"); 2059 "Path B IQK Success!!\n");
2119 result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) & 2060 result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
2120 0x3FF0000) >> 16; 2061 0x3FF0000) >> 16;
2121 result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) & 2062 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
2122 0x3FF0000) >> 16; 2063 0x3FF0000) >> 16;
2123 result[t][6] = (rtl_get_bbreg(hw, 0xec4, BMASKDWORD) & 2064 result[t][6] = (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
2124 0x3FF0000) >> 16; 2065 0x3FF0000) >> 16;
2125 result[t][7] = (rtl_get_bbreg(hw, 0xecc, BMASKDWORD) & 2066 result[t][7] = (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
2126 0x3FF0000) >> 16; 2067 0x3FF0000) >> 16;
2127 } else if (pathb_ok == 0x01) { /* Tx IQK OK */ 2068 } else if (pathb_ok == 0x01) { /* Tx IQK OK */
2128 RTPRINT(rtlpriv, FINIT, INIT_IQK, 2069 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2129 "Path B Only Tx IQK Success!!\n"); 2070 "Path B Only Tx IQK Success!!\n");
2130 result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) & 2071 result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
2131 0x3FF0000) >> 16; 2072 0x3FF0000) >> 16;
2132 result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) & 2073 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
2133 0x3FF0000) >> 16; 2074 0x3FF0000) >> 16;
2134 } else { 2075 } else {
2135 RTPRINT(rtlpriv, FINIT, INIT_IQK, 2076 RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -2140,7 +2081,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
2140 /* Back to BB mode, load original value */ 2081 /* Back to BB mode, load original value */
2141 RTPRINT(rtlpriv, FINIT, INIT_IQK, 2082 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2142 "IQK:Back to BB mode, load original value!\n"); 2083 "IQK:Back to BB mode, load original value!\n");
2143 rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0); 2084 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
2144 if (t != 0) { 2085 if (t != 0) {
2145 if (is2t) 2086 if (is2t)
2146 _rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg, 2087 _rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
@@ -2240,7 +2181,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
2240 return; 2181 return;
2241 } else if (iqk_ok) { 2182 } else if (iqk_ok) {
2242 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 2183 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
2243 BMASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */ 2184 MASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */
2244 val_x = result[final_candidate][0]; 2185 val_x = result[final_candidate][0];
2245 if ((val_x & 0x00000200) != 0) 2186 if ((val_x & 0x00000200) != 0)
2246 val_x = val_x | 0xFFFFFC00; 2187 val_x = val_x | 0xFFFFFC00;
@@ -2271,7 +2212,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
2271 ((val_y * oldval_0 >> 7) & 0x1)); 2212 ((val_y * oldval_0 >> 7) & 0x1));
2272 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n", 2213 RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
2273 rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 2214 rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
2274 BMASKDWORD)); 2215 MASKDWORD));
2275 if (txonly) { 2216 if (txonly) {
2276 RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n"); 2217 RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
2277 return; 2218 return;
@@ -2299,7 +2240,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
2299 return; 2240 return;
2300 } else if (iqk_ok) { 2241 } else if (iqk_ok) {
2301 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 2242 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
2302 BMASKDWORD) >> 22) & 0x3FF; 2243 MASKDWORD) >> 22) & 0x3FF;
2303 val_x = result[final_candidate][4]; 2244 val_x = result[final_candidate][4];
2304 if ((val_x & 0x00000200) != 0) 2245 if ((val_x & 0x00000200) != 0)
2305 val_x = val_x | 0xFFFFFC00; 2246 val_x = val_x | 0xFFFFFC00;
@@ -2657,7 +2598,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
2657 rf_mode[index] = rtl_read_byte(rtlpriv, offset); 2598 rf_mode[index] = rtl_read_byte(rtlpriv, offset);
2658 /* 2. Set RF mode = standby mode */ 2599 /* 2. Set RF mode = standby mode */
2659 rtl_set_rfreg(hw, (enum radio_path)index, RF_AC, 2600 rtl_set_rfreg(hw, (enum radio_path)index, RF_AC,
2660 BRFREGOFFSETMASK, 0x010000); 2601 RFREG_OFFSET_MASK, 0x010000);
2661 if (rtlpci->init_ready) { 2602 if (rtlpci->init_ready) {
2662 /* switch CV-curve control by LC-calibration */ 2603 /* switch CV-curve control by LC-calibration */
2663 rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7, 2604 rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
@@ -2667,16 +2608,16 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
2667 0x08000, 0x01); 2608 0x08000, 0x01);
2668 } 2609 }
2669 u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6, 2610 u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6,
2670 BRFREGOFFSETMASK); 2611 RFREG_OFFSET_MASK);
2671 while ((!(u4tmp & BIT(11))) && timecount <= timeout) { 2612 while ((!(u4tmp & BIT(11))) && timecount <= timeout) {
2672 mdelay(50); 2613 mdelay(50);
2673 timecount += 50; 2614 timecount += 50;
2674 u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, 2615 u4tmp = rtl_get_rfreg(hw, (enum radio_path)index,
2675 RF_SYN_G6, BRFREGOFFSETMASK); 2616 RF_SYN_G6, RFREG_OFFSET_MASK);
2676 } 2617 }
2677 RTPRINT(rtlpriv, FINIT, INIT_IQK, 2618 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2678 "PHY_LCK finish delay for %d ms=2\n", timecount); 2619 "PHY_LCK finish delay for %d ms=2\n", timecount);
2679 u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, BRFREGOFFSETMASK); 2620 u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK);
2680 if (index == 0 && rtlhal->interfaceindex == 0) { 2621 if (index == 0 && rtlhal->interfaceindex == 0) {
2681 RTPRINT(rtlpriv, FINIT, INIT_IQK, 2622 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2682 "path-A / 5G LCK\n"); 2623 "path-A / 5G LCK\n");
@@ -2696,9 +2637,9 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
2696 0x7f, i); 2637 0x7f, i);
2697 2638
2698 rtl_set_rfreg(hw, (enum radio_path)index, 0x4D, 2639 rtl_set_rfreg(hw, (enum radio_path)index, 0x4D,
2699 BRFREGOFFSETMASK, 0x0); 2640 RFREG_OFFSET_MASK, 0x0);
2700 readval = rtl_get_rfreg(hw, (enum radio_path)index, 2641 readval = rtl_get_rfreg(hw, (enum radio_path)index,
2701 0x4F, BRFREGOFFSETMASK); 2642 0x4F, RFREG_OFFSET_MASK);
2702 curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5; 2643 curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5;
2703 /* reg 0x4f [4:0] */ 2644 /* reg 0x4f [4:0] */
2704 /* reg 0x50 [19:10] */ 2645 /* reg 0x50 [19:10] */
@@ -2912,7 +2853,7 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
2912 } 2853 }
2913 rtl_set_rfreg(hw, (enum radio_path)rfpath, 2854 rtl_set_rfreg(hw, (enum radio_path)rfpath,
2914 currentcmd->para1, 2855 currentcmd->para1,
2915 BRFREGOFFSETMASK, 2856 RFREG_OFFSET_MASK,
2916 rtlphy->rfreg_chnlval[rfpath]); 2857 rtlphy->rfreg_chnlval[rfpath]);
2917 _rtl92d_phy_reload_imr_setting(hw, channel, 2858 _rtl92d_phy_reload_imr_setting(hw, channel,
2918 rfpath); 2859 rfpath);
@@ -2960,7 +2901,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
2960 if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && 2901 if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
2961 rtlhal->bandset == BAND_ON_BOTH) { 2902 rtlhal->bandset == BAND_ON_BOTH) {
2962 ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER, 2903 ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
2963 BMASKDWORD); 2904 MASKDWORD);
2964 if (rtlphy->current_channel > 14 && !(ret_value & BIT(0))) 2905 if (rtlphy->current_channel > 14 && !(ret_value & BIT(0)))
2965 rtl92d_phy_switch_wirelessband(hw, BAND_ON_5G); 2906 rtl92d_phy_switch_wirelessband(hw, BAND_ON_5G);
2966 else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0))) 2907 else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0)))
@@ -3112,7 +3053,7 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
3112 /* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */ 3053 /* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */
3113 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); 3054 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
3114 /* b. RF path 0 offset 0x00 = 0x00 disable RF */ 3055 /* b. RF path 0 offset 0x00 = 0x00 disable RF */
3115 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00); 3056 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
3116 /* c. APSD_CTRL 0x600[7:0] = 0x40 */ 3057 /* c. APSD_CTRL 0x600[7:0] = 0x40 */
3117 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); 3058 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
3118 /* d. APSD_CTRL 0x600[7:0] = 0x00 3059 /* d. APSD_CTRL 0x600[7:0] = 0x00
@@ -3120,12 +3061,12 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
3120 * RF path 0 offset 0x00 = 0x00 3061 * RF path 0 offset 0x00 = 0x00
3121 * APSD_CTRL 0x600[7:0] = 0x40 3062 * APSD_CTRL 0x600[7:0] = 0x40
3122 * */ 3063 * */
3123 u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK); 3064 u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
3124 while (u4btmp != 0 && delay > 0) { 3065 while (u4btmp != 0 && delay > 0) {
3125 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0); 3066 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
3126 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00); 3067 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
3127 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); 3068 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
3128 u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK); 3069 u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
3129 delay--; 3070 delay--;
3130 } 3071 }
3131 if (delay == 0) { 3072 if (delay == 0) {
@@ -3468,9 +3409,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
3468 /* 5G LAN ON */ 3409 /* 5G LAN ON */
3469 rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa); 3410 rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
3470 /* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */ 3411 /* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */
3471 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD, 3412 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
3472 0x40000100); 3413 0x40000100);
3473 rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD, 3414 rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
3474 0x40000100); 3415 0x40000100);
3475 if (rtlhal->macphymode == DUALMAC_DUALPHY) { 3416 if (rtlhal->macphymode == DUALMAC_DUALPHY) {
3476 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, 3417 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3524,16 +3465,16 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
3524 rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0); 3465 rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
3525 /* TX BB gain shift,Just for testchip,0xc80,0xc88 */ 3466 /* TX BB gain shift,Just for testchip,0xc80,0xc88 */
3526 if (rtlefuse->internal_pa_5g[0]) 3467 if (rtlefuse->internal_pa_5g[0])
3527 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD, 3468 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
3528 0x2d4000b5); 3469 0x2d4000b5);
3529 else 3470 else
3530 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD, 3471 rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
3531 0x20000080); 3472 0x20000080);
3532 if (rtlefuse->internal_pa_5g[1]) 3473 if (rtlefuse->internal_pa_5g[1])
3533 rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD, 3474 rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
3534 0x2d4000b5); 3475 0x2d4000b5);
3535 else 3476 else
3536 rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD, 3477 rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
3537 0x20000080); 3478 0x20000080);
3538 if (rtlhal->macphymode == DUALMAC_DUALPHY) { 3479 if (rtlhal->macphymode == DUALMAC_DUALPHY) {
3539 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, 3480 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3560,8 +3501,8 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
3560 } 3501 }
3561 } 3502 }
3562 /* update IQK related settings */ 3503 /* update IQK related settings */
3563 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, BMASKDWORD, 0x40000100); 3504 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
3564 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, BMASKDWORD, 0x40000100); 3505 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
3565 rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00); 3506 rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00);
3566 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) | 3507 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
3567 BIT(26) | BIT(24), 0x00); 3508 BIT(26) | BIT(24), 0x00);
@@ -3590,7 +3531,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
3590 /* DMDP */ 3531 /* DMDP */
3591 if (rtlphy->rf_type == RF_1T1R) { 3532 if (rtlphy->rf_type == RF_1T1R) {
3592 /* Use antenna 0,0xc04,0xd04 */ 3533 /* Use antenna 0,0xc04,0xd04 */
3593 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x11); 3534 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x11);
3594 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1); 3535 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1);
3595 3536
3596 /* enable ad/da clock1 for dual-phy reg0x888 */ 3537 /* enable ad/da clock1 for dual-phy reg0x888 */
@@ -3612,7 +3553,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
3612 } else { 3553 } else {
3613 /* Single PHY */ 3554 /* Single PHY */
3614 /* Use antenna 0 & 1,0xc04,0xd04 */ 3555 /* Use antenna 0 & 1,0xc04,0xd04 */
3615 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x33); 3556 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
3616 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3); 3557 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3);
3617 /* disable ad/da clock1,0x888 */ 3558 /* disable ad/da clock1,0x888 */
3618 rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0); 3559 rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0);
@@ -3620,9 +3561,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
3620 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; 3561 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
3621 rfpath++) { 3562 rfpath++) {
3622 rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath, 3563 rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath,
3623 RF_CHNLBW, BRFREGOFFSETMASK); 3564 RF_CHNLBW, RFREG_OFFSET_MASK);
3624 rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C, 3565 rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C,
3625 BRFREGOFFSETMASK); 3566 RFREG_OFFSET_MASK);
3626 } 3567 }
3627 for (i = 0; i < 2; i++) 3568 for (i = 0; i < 2; i++)
3628 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n", 3569 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
index b7498c5bafc5..7f29b8d765b3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
@@ -1295,18 +1295,4 @@
1295#define BWORD1 0xc 1295#define BWORD1 0xc
1296#define BDWORD 0xf 1296#define BDWORD 0xf
1297 1297
1298#define BMASKBYTE0 0xff
1299#define BMASKBYTE1 0xff00
1300#define BMASKBYTE2 0xff0000
1301#define BMASKBYTE3 0xff000000
1302#define BMASKHWORD 0xffff0000
1303#define BMASKLWORD 0x0000ffff
1304#define BMASKDWORD 0xffffffff
1305#define BMASK12BITS 0xfff
1306#define BMASKH4BITS 0xf0000000
1307#define BMASKOFDM_D 0xffc00000
1308#define BMASKCCK 0x3f3f3f3f
1309
1310#define BRFREGOFFSETMASK 0xfffff
1311
1312#endif 1298#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
index 20144e0b4142..6a6ac540d5b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -125,7 +125,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
125 } 125 }
126 126
127 tmpval = tx_agc[RF90_PATH_A] & 0xff; 127 tmpval = tx_agc[RF90_PATH_A] & 0xff;
128 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, BMASKBYTE1, tmpval); 128 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
129 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, 129 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
130 "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", 130 "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
131 tmpval, RTXAGC_A_CCK1_MCS32); 131 tmpval, RTXAGC_A_CCK1_MCS32);
@@ -135,7 +135,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
135 "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", 135 "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
136 tmpval, RTXAGC_B_CCK11_A_CCK2_11); 136 tmpval, RTXAGC_B_CCK11_A_CCK2_11);
137 tmpval = tx_agc[RF90_PATH_B] >> 24; 137 tmpval = tx_agc[RF90_PATH_B] >> 24;
138 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, BMASKBYTE0, tmpval); 138 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
139 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, 139 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
140 "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", 140 "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
141 tmpval, RTXAGC_B_CCK11_A_CCK2_11); 141 tmpval, RTXAGC_B_CCK11_A_CCK2_11);
@@ -360,7 +360,7 @@ static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
360 regoffset = regoffset_a[index]; 360 regoffset = regoffset_a[index];
361 else 361 else
362 regoffset = regoffset_b[index]; 362 regoffset = regoffset_b[index];
363 rtl_set_bbreg(hw, regoffset, BMASKDWORD, writeval); 363 rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
364 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, 364 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
365 "Set 0x%x = %08x\n", regoffset, writeval); 365 "Set 0x%x = %08x\n", regoffset, writeval);
366 if (((get_rf_type(rtlphy) == RF_2T2R) && 366 if (((get_rf_type(rtlphy) == RF_2T2R) &&
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 0eb0f4ae5920..99c2ab5dfceb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -545,7 +545,7 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
545 545
546void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 546void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
547 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 547 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
548 struct ieee80211_tx_info *info, 548 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
549 struct ieee80211_sta *sta, 549 struct ieee80211_sta *sta,
550 struct sk_buff *skb, 550 struct sk_buff *skb,
551 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 551 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -786,7 +786,8 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
786 SET_TX_DESC_OWN(pdesc, 1); 786 SET_TX_DESC_OWN(pdesc, 1);
787} 787}
788 788
789void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val) 789void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
790 u8 desc_name, u8 *val)
790{ 791{
791 if (istx) { 792 if (istx) {
792 switch (desc_name) { 793 switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index c1b5dfb79d53..fb5cf0634e8d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -728,8 +728,8 @@ struct rx_desc_92d {
728} __packed; 728} __packed;
729 729
730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
731 struct ieee80211_hdr *hdr, 731 struct ieee80211_hdr *hdr, u8 *pdesc,
732 u8 *pdesc, struct ieee80211_tx_info *info, 732 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
733 struct ieee80211_sta *sta, 733 struct ieee80211_sta *sta,
734 struct sk_buff *skb, u8 hw_queue, 734 struct sk_buff *skb, u8 hw_queue,
735 struct rtl_tcb_desc *ptcb_desc); 735 struct rtl_tcb_desc *ptcb_desc);
@@ -737,7 +737,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
737 struct rtl_stats *stats, 737 struct rtl_stats *stats,
738 struct ieee80211_rx_status *rx_status, 738 struct ieee80211_rx_status *rx_status,
739 u8 *pdesc, struct sk_buff *skb); 739 u8 *pdesc, struct sk_buff *skb);
740void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val); 740void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
741 u8 desc_name, u8 *val);
741u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name); 742u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name);
742void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); 743void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
743void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, 744void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 4f461786a7eb..9098558d916d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -251,7 +251,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
251 u8 e_aci = *val; 251 u8 e_aci = *val;
252 rtl92s_dm_init_edca_turbo(hw); 252 rtl92s_dm_init_edca_turbo(hw);
253 253
254 if (rtlpci->acm_method != eAcmWay2_SW) 254 if (rtlpci->acm_method != EACMWAY2_SW)
255 rtlpriv->cfg->ops->set_hw_reg(hw, 255 rtlpriv->cfg->ops->set_hw_reg(hw,
256 HW_VAR_ACM_CTRL, 256 HW_VAR_ACM_CTRL,
257 &e_aci); 257 &e_aci);
@@ -413,20 +413,18 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
413 (u8 *)(&fw_current_inps)); 413 (u8 *)(&fw_current_inps));
414 rtlpriv->cfg->ops->set_hw_reg(hw, 414 rtlpriv->cfg->ops->set_hw_reg(hw,
415 HW_VAR_H2C_FW_PWRMODE, 415 HW_VAR_H2C_FW_PWRMODE,
416 (u8 *)(&ppsc->fwctrl_psmode)); 416 &ppsc->fwctrl_psmode);
417 417
418 rtlpriv->cfg->ops->set_hw_reg(hw, 418 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
419 HW_VAR_SET_RPWM, 419 &rpwm_val);
420 (u8 *)(&rpwm_val));
421 } else { 420 } else {
422 rpwm_val = 0x0C; /* RF on */ 421 rpwm_val = 0x0C; /* RF on */
423 fw_pwrmode = FW_PS_ACTIVE_MODE; 422 fw_pwrmode = FW_PS_ACTIVE_MODE;
424 fw_current_inps = false; 423 fw_current_inps = false;
425 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, 424 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
426 (u8 *)(&rpwm_val)); 425 &rpwm_val);
427 rtlpriv->cfg->ops->set_hw_reg(hw, 426 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
428 HW_VAR_H2C_FW_PWRMODE, 427 &fw_pwrmode);
429 (u8 *)(&fw_pwrmode));
430 428
431 rtlpriv->cfg->ops->set_hw_reg(hw, 429 rtlpriv->cfg->ops->set_hw_reg(hw,
432 HW_VAR_FW_PSMODE_STATUS, 430 HW_VAR_FW_PSMODE_STATUS,
@@ -955,7 +953,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
955 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 953 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
956 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 954 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
957 u8 tmp_byte = 0; 955 u8 tmp_byte = 0;
958 956 unsigned long flags;
959 bool rtstatus = true; 957 bool rtstatus = true;
960 u8 tmp_u1b; 958 u8 tmp_u1b;
961 int err = false; 959 int err = false;
@@ -967,6 +965,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
967 965
968 rtlpci->being_init_adapter = true; 966 rtlpci->being_init_adapter = true;
969 967
968 /* As this function can take a very long time (up to 350 ms)
969 * and can be called with irqs disabled, reenable the irqs
970 * to let the other devices continue being serviced.
971 *
972 * It is safe doing so since our own interrupts will only be enabled
973 * in a subsequent step.
974 */
975 local_save_flags(flags);
976 local_irq_enable();
977
970 rtlpriv->intf_ops->disable_aspm(hw); 978 rtlpriv->intf_ops->disable_aspm(hw);
971 979
972 /* 1. MAC Initialize */ 980 /* 1. MAC Initialize */
@@ -984,7 +992,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
984 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 992 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
985 "Failed to download FW. Init HW without FW now... " 993 "Failed to download FW. Init HW without FW now... "
986 "Please copy FW into /lib/firmware/rtlwifi\n"); 994 "Please copy FW into /lib/firmware/rtlwifi\n");
987 return 1; 995 err = 1;
996 goto exit;
988 } 997 }
989 998
990 /* After FW download, we have to reset MAC register */ 999 /* After FW download, we have to reset MAC register */
@@ -997,7 +1006,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
997 /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */ 1006 /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
998 if (!rtl92s_phy_mac_config(hw)) { 1007 if (!rtl92s_phy_mac_config(hw)) {
999 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n"); 1008 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
1000 return rtstatus; 1009 err = rtstatus;
1010 goto exit;
1001 } 1011 }
1002 1012
1003 /* because last function modify RCR, so we update 1013 /* because last function modify RCR, so we update
@@ -1016,7 +1026,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
1016 /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */ 1026 /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
1017 if (!rtl92s_phy_bb_config(hw)) { 1027 if (!rtl92s_phy_bb_config(hw)) {
1018 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n"); 1028 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
1019 return rtstatus; 1029 err = rtstatus;
1030 goto exit;
1020 } 1031 }
1021 1032
1022 /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */ 1033 /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
@@ -1033,7 +1044,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
1033 1044
1034 if (!rtl92s_phy_rf_config(hw)) { 1045 if (!rtl92s_phy_rf_config(hw)) {
1035 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n"); 1046 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
1036 return rtstatus; 1047 err = rtstatus;
1048 goto exit;
1037 } 1049 }
1038 1050
1039 /* After read predefined TXT, we must set BB/MAC/RF 1051 /* After read predefined TXT, we must set BB/MAC/RF
@@ -1122,8 +1134,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
1122 1134
1123 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON); 1135 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
1124 rtl92s_dm_init(hw); 1136 rtl92s_dm_init(hw);
1137exit:
1138 local_irq_restore(flags);
1125 rtlpci->being_init_adapter = false; 1139 rtlpci->being_init_adapter = false;
1126
1127 return err; 1140 return err;
1128} 1141}
1129 1142
@@ -1135,12 +1148,13 @@ void rtl92se_set_mac_addr(struct rtl_io *io, const u8 *addr)
1135void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1148void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1136{ 1149{
1137 struct rtl_priv *rtlpriv = rtl_priv(hw); 1150 struct rtl_priv *rtlpriv = rtl_priv(hw);
1138 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1151 u32 reg_rcr;
1139 u32 reg_rcr = rtlpci->receive_config;
1140 1152
1141 if (rtlpriv->psc.rfpwr_state != ERFON) 1153 if (rtlpriv->psc.rfpwr_state != ERFON)
1142 return; 1154 return;
1143 1155
1156 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1157
1144 if (check_bssid) { 1158 if (check_bssid) {
1145 reg_rcr |= (RCR_CBSSID); 1159 reg_rcr |= (RCR_CBSSID);
1146 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr)); 1160 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 9c092e6eb3fe..77c5b5f35244 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../ps.h" 32#include "../ps.h"
33#include "../core.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -833,18 +834,7 @@ static bool _rtl92s_phy_config_bb(struct ieee80211_hw *hw, u8 configtype)
833 834
834 if (configtype == BASEBAND_CONFIG_PHY_REG) { 835 if (configtype == BASEBAND_CONFIG_PHY_REG) {
835 for (i = 0; i < phy_reg_len; i = i + 2) { 836 for (i = 0; i < phy_reg_len; i = i + 2) {
836 if (phy_reg_table[i] == 0xfe) 837 rtl_addr_delay(phy_reg_table[i]);
837 mdelay(50);
838 else if (phy_reg_table[i] == 0xfd)
839 mdelay(5);
840 else if (phy_reg_table[i] == 0xfc)
841 mdelay(1);
842 else if (phy_reg_table[i] == 0xfb)
843 udelay(50);
844 else if (phy_reg_table[i] == 0xfa)
845 udelay(5);
846 else if (phy_reg_table[i] == 0xf9)
847 udelay(1);
848 838
849 /* Add delay for ECS T20 & LG malow platform, */ 839 /* Add delay for ECS T20 & LG malow platform, */
850 udelay(1); 840 udelay(1);
@@ -886,18 +876,7 @@ static bool _rtl92s_phy_set_bb_to_diff_rf(struct ieee80211_hw *hw,
886 876
887 if (configtype == BASEBAND_CONFIG_PHY_REG) { 877 if (configtype == BASEBAND_CONFIG_PHY_REG) {
888 for (i = 0; i < phy_regarray2xtxr_len; i = i + 3) { 878 for (i = 0; i < phy_regarray2xtxr_len; i = i + 3) {
889 if (phy_regarray2xtxr_table[i] == 0xfe) 879 rtl_addr_delay(phy_regarray2xtxr_table[i]);
890 mdelay(50);
891 else if (phy_regarray2xtxr_table[i] == 0xfd)
892 mdelay(5);
893 else if (phy_regarray2xtxr_table[i] == 0xfc)
894 mdelay(1);
895 else if (phy_regarray2xtxr_table[i] == 0xfb)
896 udelay(50);
897 else if (phy_regarray2xtxr_table[i] == 0xfa)
898 udelay(5);
899 else if (phy_regarray2xtxr_table[i] == 0xf9)
900 udelay(1);
901 880
902 rtl92s_phy_set_bb_reg(hw, phy_regarray2xtxr_table[i], 881 rtl92s_phy_set_bb_reg(hw, phy_regarray2xtxr_table[i],
903 phy_regarray2xtxr_table[i + 1], 882 phy_regarray2xtxr_table[i + 1],
@@ -920,18 +899,7 @@ static bool _rtl92s_phy_config_bb_with_pg(struct ieee80211_hw *hw,
920 899
921 if (configtype == BASEBAND_CONFIG_PHY_REG) { 900 if (configtype == BASEBAND_CONFIG_PHY_REG) {
922 for (i = 0; i < phy_pg_len; i = i + 3) { 901 for (i = 0; i < phy_pg_len; i = i + 3) {
923 if (phy_table_pg[i] == 0xfe) 902 rtl_addr_delay(phy_table_pg[i]);
924 mdelay(50);
925 else if (phy_table_pg[i] == 0xfd)
926 mdelay(5);
927 else if (phy_table_pg[i] == 0xfc)
928 mdelay(1);
929 else if (phy_table_pg[i] == 0xfb)
930 udelay(50);
931 else if (phy_table_pg[i] == 0xfa)
932 udelay(5);
933 else if (phy_table_pg[i] == 0xf9)
934 udelay(1);
935 903
936 _rtl92s_store_pwrindex_diffrate_offset(hw, 904 _rtl92s_store_pwrindex_diffrate_offset(hw,
937 phy_table_pg[i], 905 phy_table_pg[i],
@@ -1034,28 +1002,9 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
1034 switch (rfpath) { 1002 switch (rfpath) {
1035 case RF90_PATH_A: 1003 case RF90_PATH_A:
1036 for (i = 0; i < radio_a_tblen; i = i + 2) { 1004 for (i = 0; i < radio_a_tblen; i = i + 2) {
1037 if (radio_a_table[i] == 0xfe) 1005 rtl_rfreg_delay(hw, rfpath, radio_a_table[i],
1038 /* Delay specific ms. Only RF configuration 1006 MASK20BITS, radio_a_table[i + 1]);
1039 * requires delay. */
1040 mdelay(50);
1041 else if (radio_a_table[i] == 0xfd)
1042 mdelay(5);
1043 else if (radio_a_table[i] == 0xfc)
1044 mdelay(1);
1045 else if (radio_a_table[i] == 0xfb)
1046 udelay(50);
1047 else if (radio_a_table[i] == 0xfa)
1048 udelay(5);
1049 else if (radio_a_table[i] == 0xf9)
1050 udelay(1);
1051 else
1052 rtl92s_phy_set_rf_reg(hw, rfpath,
1053 radio_a_table[i],
1054 MASK20BITS,
1055 radio_a_table[i + 1]);
1056 1007
1057 /* Add delay for ECS T20 & LG malow platform */
1058 udelay(1);
1059 } 1008 }
1060 1009
1061 /* PA Bias current for inferiority IC */ 1010 /* PA Bias current for inferiority IC */
@@ -1063,28 +1012,8 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
1063 break; 1012 break;
1064 case RF90_PATH_B: 1013 case RF90_PATH_B:
1065 for (i = 0; i < radio_b_tblen; i = i + 2) { 1014 for (i = 0; i < radio_b_tblen; i = i + 2) {
1066 if (radio_b_table[i] == 0xfe) 1015 rtl_rfreg_delay(hw, rfpath, radio_b_table[i],
1067 /* Delay specific ms. Only RF configuration 1016 MASK20BITS, radio_b_table[i + 1]);
1068 * requires delay.*/
1069 mdelay(50);
1070 else if (radio_b_table[i] == 0xfd)
1071 mdelay(5);
1072 else if (radio_b_table[i] == 0xfc)
1073 mdelay(1);
1074 else if (radio_b_table[i] == 0xfb)
1075 udelay(50);
1076 else if (radio_b_table[i] == 0xfa)
1077 udelay(5);
1078 else if (radio_b_table[i] == 0xf9)
1079 udelay(1);
1080 else
1081 rtl92s_phy_set_rf_reg(hw, rfpath,
1082 radio_b_table[i],
1083 MASK20BITS,
1084 radio_b_table[i + 1]);
1085
1086 /* Add delay for ECS T20 & LG malow platform */
1087 udelay(1);
1088 } 1017 }
1089 break; 1018 break;
1090 case RF90_PATH_C: 1019 case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index c81c83591940..e13043479b71 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -1165,16 +1165,4 @@
1165 1165
1166#define BTX_AGCRATECCK 0x7f00 1166#define BTX_AGCRATECCK 0x7f00
1167 1167
1168#define MASKBYTE0 0xff
1169#define MASKBYTE1 0xff00
1170#define MASKBYTE2 0xff0000
1171#define MASKBYTE3 0xff000000
1172#define MASKHWORD 0xffff0000
1173#define MASKLWORD 0x0000ffff
1174#define MASKDWORD 0xffffffff
1175
1176#define MAKS12BITS 0xfffff
1177#define MASK20BITS 0xfffff
1178#define RFREG_OFFSET_MASK 0xfffff
1179
1180#endif 1168#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 92d38ab3c60e..78a81c1e390b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -52,7 +52,7 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
52 /* We only care about the path A for legacy. */ 52 /* We only care about the path A for legacy. */
53 if (rtlefuse->eeprom_version < 2) { 53 if (rtlefuse->eeprom_version < 2) {
54 pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf); 54 pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf);
55 } else if (rtlefuse->eeprom_version >= 2) { 55 } else {
56 legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff 56 legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff
57 [RF90_PATH_A][chnl - 1]; 57 [RF90_PATH_A][chnl - 1];
58 58
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 27efbcdac6a9..36b48be8329c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
310 /* during testing, hdr was NULL here */ 310 /* during testing, hdr was NULL here */
311 return false; 311 return false;
312 } 312 }
313 if ((ieee80211_is_robust_mgmt_frame(hdr)) && 313 if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
314 (ieee80211_has_protected(hdr->frame_control))) 314 (ieee80211_has_protected(hdr->frame_control)))
315 rx_status->flag &= ~RX_FLAG_DECRYPTED; 315 rx_status->flag &= ~RX_FLAG_DECRYPTED;
316 else 316 else
@@ -336,7 +336,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
336 336
337void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, 337void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
338 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 338 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
339 struct ieee80211_tx_info *info, 339 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
340 struct ieee80211_sta *sta, 340 struct ieee80211_sta *sta,
341 struct sk_buff *skb, 341 struct sk_buff *skb,
342 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 342 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -573,7 +573,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
573 } 573 }
574} 574}
575 575
576void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val) 576void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
577 u8 desc_name, u8 *val)
577{ 578{
578 if (istx) { 579 if (istx) {
579 switch (desc_name) { 580 switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 64dd66f287c1..5a13f17e3b41 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -29,8 +29,9 @@
29#ifndef __REALTEK_PCI92SE_TRX_H__ 29#ifndef __REALTEK_PCI92SE_TRX_H__
30#define __REALTEK_PCI92SE_TRX_H__ 30#define __REALTEK_PCI92SE_TRX_H__
31 31
32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, 32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
33 u8 *pdesc, struct ieee80211_tx_info *info, 33 struct ieee80211_hdr *hdr, u8 *pdesc,
34 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
34 struct ieee80211_sta *sta, 35 struct ieee80211_sta *sta,
35 struct sk_buff *skb, u8 hw_queue, 36 struct sk_buff *skb, u8 hw_queue,
36 struct rtl_tcb_desc *ptcb_desc); 37 struct rtl_tcb_desc *ptcb_desc);
@@ -39,7 +40,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
39bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, 40bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
40 struct ieee80211_rx_status *rx_status, u8 *pdesc, 41 struct ieee80211_rx_status *rx_status, u8 *pdesc,
41 struct sk_buff *skb); 42 struct sk_buff *skb);
42void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val); 43void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
44 u8 desc_name, u8 *val);
43u32 rtl92se_get_desc(u8 *pdesc, bool istx, u8 desc_name); 45u32 rtl92se_get_desc(u8 *pdesc, bool istx, u8 desc_name);
44void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); 46void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
45 47
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
index 4ed731f09b1f..9c34a85fdb89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
@@ -10,7 +10,6 @@ rtl8723ae-objs := \
10 led.o \ 10 led.o \
11 phy.o \ 11 phy.o \
12 pwrseq.o \ 12 pwrseq.o \
13 pwrseqcmd.o \
14 rf.o \ 13 rf.o \
15 sw.o \ 14 sw.o \
16 table.o \ 15 table.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
index 8c110356dff9..debe261a7eeb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
@@ -46,11 +46,6 @@
46#define E_CUT_VERSION BIT(14) 46#define E_CUT_VERSION BIT(14)
47#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28)) 47#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
48 48
49enum version_8723e {
50 VERSION_TEST_UMC_CHIP_8723 = 0x0081,
51 VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
52 VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
53};
54 49
55/* MASK */ 50/* MASK */
56#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) 51#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
index a36eee28f9e7..25cc83058b01 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -35,6 +35,7 @@
35#include "def.h" 35#include "def.h"
36#include "phy.h" 36#include "phy.h"
37#include "dm.h" 37#include "dm.h"
38#include "../rtl8723com/dm_common.h"
38#include "fw.h" 39#include "fw.h"
39#include "hal_btc.h" 40#include "hal_btc.h"
40 41
@@ -483,16 +484,6 @@ static void rtl8723ae_dm_dig(struct ieee80211_hw *hw)
483 rtl8723ae_dm_ctrl_initgain_by_twoport(hw); 484 rtl8723ae_dm_ctrl_initgain_by_twoport(hw);
484} 485}
485 486
486static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
487{
488 struct rtl_priv *rtlpriv = rtl_priv(hw);
489
490 rtlpriv->dm.dynamic_txpower_enable = false;
491
492 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
493 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
494}
495
496static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw) 487static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw)
497{ 488{
498 struct rtl_priv *rtlpriv = rtl_priv(hw); 489 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -585,19 +576,6 @@ void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw)
585 } 576 }
586} 577}
587 578
588static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw)
589{
590}
591
592void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
593{
594 struct rtl_priv *rtlpriv = rtl_priv(hw);
595
596 rtlpriv->dm.current_turbo_edca = false;
597 rtlpriv->dm.is_any_nonbepkts = false;
598 rtlpriv->dm.is_cur_rdlstate = false;
599}
600
601static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw) 579static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
602{ 580{
603 struct rtl_priv *rtlpriv = rtl_priv(hw); 581 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -669,9 +647,8 @@ static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
669 } else { 647 } else {
670 if (rtlpriv->dm.current_turbo_edca) { 648 if (rtlpriv->dm.current_turbo_edca) {
671 u8 tmp = AC0_BE; 649 u8 tmp = AC0_BE;
672 rtlpriv->cfg->ops->set_hw_reg(hw, 650 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
673 HW_VAR_AC_PARAM, 651 &tmp);
674 (u8 *) (&tmp));
675 rtlpriv->dm.current_turbo_edca = false; 652 rtlpriv->dm.current_turbo_edca = false;
676 } 653 }
677 } 654 }
@@ -778,17 +755,6 @@ static void rtl8723ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
778 } 755 }
779} 756}
780 757
781static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw)
782{
783 struct rtl_priv *rtlpriv = rtl_priv(hw);
784
785 rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
786 rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
787 rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
788 rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
789 rtlpriv->dm_pstable.rssi_val_min = 0;
790}
791
792void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal) 758void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal)
793{ 759{
794 struct rtl_priv *rtlpriv = rtl_priv(hw); 760 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -905,11 +871,11 @@ void rtl8723ae_dm_init(struct ieee80211_hw *hw)
905 871
906 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 872 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
907 rtl8723ae_dm_diginit(hw); 873 rtl8723ae_dm_diginit(hw);
908 rtl8723ae_dm_init_dynamic_txpower(hw); 874 rtl8723_dm_init_dynamic_txpower(hw);
909 rtl8723ae_dm_init_edca_turbo(hw); 875 rtl8723_dm_init_edca_turbo(hw);
910 rtl8723ae_dm_init_rate_adaptive_mask(hw); 876 rtl8723ae_dm_init_rate_adaptive_mask(hw);
911 rtl8723ae_dm_initialize_txpower_tracking(hw); 877 rtl8723ae_dm_initialize_txpower_tracking(hw);
912 rtl8723ae_dm_init_dynamic_bpowersaving(hw); 878 rtl8723_dm_init_dynamic_bb_powersaving(hw);
913} 879}
914 880
915void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw) 881void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
@@ -930,7 +896,6 @@ void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
930 if ((ppsc->rfpwr_state == ERFON) && 896 if ((ppsc->rfpwr_state == ERFON) &&
931 ((!fw_current_inpsmode) && fw_ps_awake) && 897 ((!fw_current_inpsmode) && fw_ps_awake) &&
932 (!ppsc->rfchange_inprogress)) { 898 (!ppsc->rfchange_inprogress)) {
933 rtl8723ae_dm_pwdmonitor(hw);
934 rtl8723ae_dm_dig(hw); 899 rtl8723ae_dm_dig(hw);
935 rtl8723ae_dm_false_alarm_counter_statistics(hw); 900 rtl8723ae_dm_false_alarm_counter_statistics(hw);
936 rtl8723ae_dm_dynamic_bpowersaving(hw); 901 rtl8723ae_dm_dynamic_bpowersaving(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
index a372b0204456..d253bb53d03e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -147,7 +147,6 @@ enum dm_dig_connect_e {
147void rtl8723ae_dm_init(struct ieee80211_hw *hw); 147void rtl8723ae_dm_init(struct ieee80211_hw *hw);
148void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw); 148void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw);
149void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw); 149void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw);
150void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
151void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); 150void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
152void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); 151void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
153void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw); 152void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
index ba1502b172a6..728b7563ad36 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -34,199 +34,7 @@
34#include "reg.h" 34#include "reg.h"
35#include "def.h" 35#include "def.h"
36#include "fw.h" 36#include "fw.h"
37 37#include "../rtl8723com/fw_common.h"
38static void _rtl8723ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
39{
40 struct rtl_priv *rtlpriv = rtl_priv(hw);
41 u8 tmp;
42 if (enable) {
43 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
44 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
45
46 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
47 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
48
49 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
50 rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
51 } else {
52 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
53 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
54
55 rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
56 }
57}
58
59static void _rtl8723ae_fw_block_write(struct ieee80211_hw *hw,
60 const u8 *buffer, u32 size)
61{
62 struct rtl_priv *rtlpriv = rtl_priv(hw);
63 u32 blockSize = sizeof(u32);
64 u8 *bufferPtr = (u8 *) buffer;
65 u32 *pu4BytePtr = (u32 *) buffer;
66 u32 i, offset, blockCount, remainSize;
67
68 blockCount = size / blockSize;
69 remainSize = size % blockSize;
70
71 for (i = 0; i < blockCount; i++) {
72 offset = i * blockSize;
73 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
74 *(pu4BytePtr + i));
75 }
76
77 if (remainSize) {
78 offset = blockCount * blockSize;
79 bufferPtr += offset;
80 for (i = 0; i < remainSize; i++) {
81 rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
82 offset + i), *(bufferPtr + i));
83 }
84 }
85}
86
87static void _rtl8723ae_fw_page_write(struct ieee80211_hw *hw,
88 u32 page, const u8 *buffer, u32 size)
89{
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 u8 value8;
92 u8 u8page = (u8) (page & 0x07);
93
94 value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
95
96 rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
97 _rtl8723ae_fw_block_write(hw, buffer, size);
98}
99
100static void _rtl8723ae_write_fw(struct ieee80211_hw *hw,
101 enum version_8723e version, u8 *buffer,
102 u32 size)
103{
104 struct rtl_priv *rtlpriv = rtl_priv(hw);
105 u8 *bufferPtr = (u8 *) buffer;
106 u32 page_nums, remain_size;
107 u32 page, offset;
108
109 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
110
111 page_nums = size / FW_8192C_PAGE_SIZE;
112 remain_size = size % FW_8192C_PAGE_SIZE;
113
114 if (page_nums > 6) {
115 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
116 "Page numbers should not be greater then 6\n");
117 }
118
119 for (page = 0; page < page_nums; page++) {
120 offset = page * FW_8192C_PAGE_SIZE;
121 _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
122 FW_8192C_PAGE_SIZE);
123 }
124
125 if (remain_size) {
126 offset = page_nums * FW_8192C_PAGE_SIZE;
127 page = page_nums;
128 _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
129 remain_size);
130 }
131
132 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
133}
134
135static int _rtl8723ae_fw_free_to_go(struct ieee80211_hw *hw)
136{
137 struct rtl_priv *rtlpriv = rtl_priv(hw);
138 int err = -EIO;
139 u32 counter = 0;
140 u32 value32;
141
142 do {
143 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
144 } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
145 (!(value32 & FWDL_ChkSum_rpt)));
146
147 if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
148 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
149 "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
150 value32);
151 goto exit;
152 }
153
154 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
155 "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
156
157 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
158 value32 |= MCUFWDL_RDY;
159 value32 &= ~WINTINI_RDY;
160 rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
161
162 counter = 0;
163
164 do {
165 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
166 if (value32 & WINTINI_RDY) {
167 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
168 "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
169 value32);
170 err = 0;
171 goto exit;
172 }
173
174 mdelay(FW_8192C_POLLING_DELAY);
175
176 } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
177
178 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
179 "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
180
181exit:
182 return err;
183}
184
185int rtl8723ae_download_fw(struct ieee80211_hw *hw)
186{
187 struct rtl_priv *rtlpriv = rtl_priv(hw);
188 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
189 struct rtl8723ae_firmware_header *pfwheader;
190 u8 *pfwdata;
191 u32 fwsize;
192 int err;
193 enum version_8723e version = rtlhal->version;
194
195 if (!rtlhal->pfirmware)
196 return 1;
197
198 pfwheader = (struct rtl8723ae_firmware_header *)rtlhal->pfirmware;
199 pfwdata = (u8 *) rtlhal->pfirmware;
200 fwsize = rtlhal->fwsize;
201
202 if (IS_FW_HEADER_EXIST(pfwheader)) {
203 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
204 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
205 pfwheader->version, pfwheader->signature,
206 (int)sizeof(struct rtl8723ae_firmware_header));
207
208 pfwdata = pfwdata + sizeof(struct rtl8723ae_firmware_header);
209 fwsize = fwsize - sizeof(struct rtl8723ae_firmware_header);
210 }
211
212 if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
213 rtl8723ae_firmware_selfreset(hw);
214 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
215 }
216 _rtl8723ae_enable_fw_download(hw, true);
217 _rtl8723ae_write_fw(hw, version, pfwdata, fwsize);
218 _rtl8723ae_enable_fw_download(hw, false);
219
220 err = _rtl8723ae_fw_free_to_go(hw);
221 if (err) {
222 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
223 "Firmware is not ready to run!\n");
224 } else {
225 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
226 "Firmware is ready to run!\n");
227 }
228 return 0;
229}
230 38
231static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) 39static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
232{ 40{
@@ -463,50 +271,6 @@ void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw,
463 return; 271 return;
464} 272}
465 273
466void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
467{
468 u8 u1tmp;
469 u8 delay = 100;
470 struct rtl_priv *rtlpriv = rtl_priv(hw);
471
472 rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
473 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
474
475 while (u1tmp & BIT(2)) {
476 delay--;
477 if (delay == 0)
478 break;
479 udelay(50);
480 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
481 }
482 if (delay == 0) {
483 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
484 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
485 }
486}
487
488void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
489{
490 struct rtl_priv *rtlpriv = rtl_priv(hw);
491 u8 u1_h2c_set_pwrmode[3] = { 0 };
492 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
493
494 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
495
496 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
497 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
498 (rtlpriv->mac80211.p2p) ?
499 ppsc->smart_ps : 1);
500 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
501 ppsc->reg_max_lps_awakeintvl);
502
503 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
504 "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
505 u1_h2c_set_pwrmode, 3);
506 rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
507
508}
509
510static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw, 274static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
511 struct sk_buff *skb) 275 struct sk_buff *skb)
512{ 276{
@@ -812,7 +576,6 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
812 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); 576 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
813 577
814 p2p_ps_offload->offload_en = 1; 578 p2p_ps_offload->offload_en = 1;
815
816 if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { 579 if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
817 p2p_ps_offload->role = 1; 580 p2p_ps_offload->role = 1;
818 p2p_ps_offload->allstasleep = 0; 581 p2p_ps_offload->allstasleep = 0;
@@ -836,3 +599,24 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
836 } 599 }
837 rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); 600 rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
838} 601}
602
603void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
604{
605 struct rtl_priv *rtlpriv = rtl_priv(hw);
606 u8 u1_h2c_set_pwrmode[3] = { 0 };
607 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
608
609 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
610
611 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
612 SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(u1_h2c_set_pwrmode,
613 (rtlpriv->mac80211.p2p) ?
614 ppsc->smart_ps : 1);
615 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
616 ppsc->reg_max_lps_awakeintvl);
617
618 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
619 "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
620 u1_h2c_set_pwrmode, 3);
621 rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
622}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
index ed3b795e6980..d355b85dd9fe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
@@ -34,7 +34,7 @@
34#define FW_8192C_END_ADDRESS 0x3FFF 34#define FW_8192C_END_ADDRESS 0x3FFF
35#define FW_8192C_PAGE_SIZE 4096 35#define FW_8192C_PAGE_SIZE 4096
36#define FW_8192C_POLLING_DELAY 5 36#define FW_8192C_POLLING_DELAY 5
37#define FW_8192C_POLLING_TIMEOUT_COUNT 1000 37#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
38 38
39#define BEACON_PG 0 39#define BEACON_PG 0
40#define PSPOLL_PG 2 40#define PSPOLL_PG 2
@@ -65,21 +65,9 @@ struct rtl8723ae_firmware_header {
65 u32 rsvd5; 65 u32 rsvd5;
66}; 66};
67 67
68enum rtl8192c_h2c_cmd {
69 H2C_AP_OFFLOAD = 0,
70 H2C_SETPWRMODE = 1,
71 H2C_JOINBSSRPT = 2,
72 H2C_RSVDPAGE = 3,
73 H2C_RSSI_REPORT = 4,
74 H2C_P2P_PS_CTW_CMD = 5,
75 H2C_P2P_PS_OFFLOAD = 6,
76 H2C_RA_MASK = 7,
77 MAX_H2CCMD
78};
79
80#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ 68#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
81 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) 69 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
82#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \ 70#define SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(__ph2ccmd, __val) \
83 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) 71 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
84#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \ 72#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
85 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) 73 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
@@ -92,10 +80,8 @@ enum rtl8192c_h2c_cmd {
92#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ 80#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
93 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) 81 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
94 82
95int rtl8723ae_download_fw(struct ieee80211_hw *hw);
96void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, 83void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
97 u32 cmd_len, u8 *p_cmdbuffer); 84 u32 cmd_len, u8 *p_cmdbuffer);
98void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
99void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 85void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
100void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 86void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
101void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 87void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 3d092e4b0b7f..48fee1be78c2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -31,6 +31,8 @@
31#include "../pci.h" 31#include "../pci.h"
32#include "dm.h" 32#include "dm.h"
33#include "fw.h" 33#include "fw.h"
34#include "../rtl8723com/fw_common.h"
35#include "../rtl8723com/fw_common.h"
34#include "phy.h" 36#include "phy.h"
35#include "reg.h" 37#include "reg.h"
36#include "hal_btc.h" 38#include "hal_btc.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
index 68c28340f791..5d534df8d90c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
@@ -30,7 +30,9 @@
30#include "hal_btc.h" 30#include "hal_btc.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "phy.h" 32#include "phy.h"
33#include "../rtl8723com/phy_common.h"
33#include "fw.h" 34#include "fw.h"
35#include "../rtl8723com/fw_common.h"
34#include "reg.h" 36#include "reg.h"
35#include "def.h" 37#include "def.h"
36 38
@@ -391,13 +393,13 @@ static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw,
391 if (sw_dac_swing_on) { 393 if (sw_dac_swing_on) {
392 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, 394 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
393 "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl); 395 "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
394 rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 396 rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000,
395 sw_dac_swing_lvl); 397 sw_dac_swing_lvl);
396 rtlpcipriv->bt_coexist.sw_coexist_all_off = false; 398 rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
397 } else { 399 } else {
398 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, 400 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
399 "[BTCoex], SwDacSwing Off!\n"); 401 "[BTCoex], SwDacSwing Off!\n");
400 rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0); 402 rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
401 } 403 }
402} 404}
403 405
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index c333dfd116b8..65c9e80e1f78 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -38,10 +38,11 @@
38#include "def.h" 38#include "def.h"
39#include "phy.h" 39#include "phy.h"
40#include "dm.h" 40#include "dm.h"
41#include "../rtl8723com/dm_common.h"
41#include "fw.h" 42#include "fw.h"
43#include "../rtl8723com/fw_common.h"
42#include "led.h" 44#include "led.h"
43#include "hw.h" 45#include "hw.h"
44#include "pwrseqcmd.h"
45#include "pwrseq.h" 46#include "pwrseq.h"
46#include "btc.h" 47#include "btc.h"
47 48
@@ -206,14 +207,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
206 rtl_write_byte(rtlpriv, REG_SLOT, val[0]); 207 rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
207 208
208 for (e_aci = 0; e_aci < AC_MAX; e_aci++) { 209 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
209 rtlpriv->cfg->ops->set_hw_reg(hw, 210 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
210 HW_VAR_AC_PARAM, 211 &e_aci);
211 (u8 *) (&e_aci));
212 } 212 }
213 break; } 213 break; }
214 case HW_VAR_ACK_PREAMBLE:{ 214 case HW_VAR_ACK_PREAMBLE:{
215 u8 reg_tmp; 215 u8 reg_tmp;
216 u8 short_preamble = (bool) (*(u8 *) val); 216 u8 short_preamble = (bool)*val;
217 reg_tmp = (mac->cur_40_prime_sc) << 5; 217 reg_tmp = (mac->cur_40_prime_sc) << 5;
218 if (short_preamble) 218 if (short_preamble)
219 reg_tmp |= 0x80; 219 reg_tmp |= 0x80;
@@ -224,7 +224,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
224 u8 min_spacing_to_set; 224 u8 min_spacing_to_set;
225 u8 sec_min_space; 225 u8 sec_min_space;
226 226
227 min_spacing_to_set = *((u8 *) val); 227 min_spacing_to_set = *val;
228 if (min_spacing_to_set <= 7) { 228 if (min_spacing_to_set <= 7) {
229 sec_min_space = 0; 229 sec_min_space = 0;
230 230
@@ -248,7 +248,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
248 case HW_VAR_SHORTGI_DENSITY:{ 248 case HW_VAR_SHORTGI_DENSITY:{
249 u8 density_to_set; 249 u8 density_to_set;
250 250
251 density_to_set = *((u8 *) val); 251 density_to_set = *val;
252 mac->min_space_cfg |= (density_to_set << 3); 252 mac->min_space_cfg |= (density_to_set << 3);
253 253
254 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 254 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -272,7 +272,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
272 else 272 else
273 p_regtoset = regtoset_normal; 273 p_regtoset = regtoset_normal;
274 274
275 factor_toset = *((u8 *) val); 275 factor_toset = *val;
276 if (factor_toset <= 3) { 276 if (factor_toset <= 3) {
277 factor_toset = (1 << (factor_toset + 2)); 277 factor_toset = (1 << (factor_toset + 2));
278 if (factor_toset > 0xf) 278 if (factor_toset > 0xf)
@@ -303,16 +303,15 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
303 } 303 }
304 break; } 304 break; }
305 case HW_VAR_AC_PARAM:{ 305 case HW_VAR_AC_PARAM:{
306 u8 e_aci = *((u8 *) val); 306 u8 e_aci = *val;
307 rtl8723ae_dm_init_edca_turbo(hw); 307 rtl8723_dm_init_edca_turbo(hw);
308 308
309 if (rtlpci->acm_method != eAcmWay2_SW) 309 if (rtlpci->acm_method != EACMWAY2_SW)
310 rtlpriv->cfg->ops->set_hw_reg(hw, 310 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
311 HW_VAR_ACM_CTRL, 311 &e_aci);
312 (u8 *) (&e_aci));
313 break; } 312 break; }
314 case HW_VAR_ACM_CTRL:{ 313 case HW_VAR_ACM_CTRL:{
315 u8 e_aci = *((u8 *) val); 314 u8 e_aci = *val;
316 union aci_aifsn *p_aci_aifsn = 315 union aci_aifsn *p_aci_aifsn =
317 (union aci_aifsn *)(&(mac->ac[0].aifs)); 316 (union aci_aifsn *)(&(mac->ac[0].aifs));
318 u8 acm = p_aci_aifsn->f.acm; 317 u8 acm = p_aci_aifsn->f.acm;
@@ -365,7 +364,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
365 rtlpci->receive_config = ((u32 *) (val))[0]; 364 rtlpci->receive_config = ((u32 *) (val))[0];
366 break; 365 break;
367 case HW_VAR_RETRY_LIMIT:{ 366 case HW_VAR_RETRY_LIMIT:{
368 u8 retry_limit = ((u8 *) (val))[0]; 367 u8 retry_limit = *val;
369 368
370 rtl_write_word(rtlpriv, REG_RL, 369 rtl_write_word(rtlpriv, REG_RL,
371 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 370 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -378,13 +377,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
378 rtlefuse->efuse_usedbytes = *((u16 *) val); 377 rtlefuse->efuse_usedbytes = *((u16 *) val);
379 break; 378 break;
380 case HW_VAR_EFUSE_USAGE: 379 case HW_VAR_EFUSE_USAGE:
381 rtlefuse->efuse_usedpercentage = *((u8 *) val); 380 rtlefuse->efuse_usedpercentage = *val;
382 break; 381 break;
383 case HW_VAR_IO_CMD: 382 case HW_VAR_IO_CMD:
384 rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val)); 383 rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
385 break; 384 break;
386 case HW_VAR_WPA_CONFIG: 385 case HW_VAR_WPA_CONFIG:
387 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); 386 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
388 break; 387 break;
389 case HW_VAR_SET_RPWM:{ 388 case HW_VAR_SET_RPWM:{
390 u8 rpwm_val; 389 u8 rpwm_val;
@@ -393,27 +392,25 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
393 udelay(1); 392 udelay(1);
394 393
395 if (rpwm_val & BIT(7)) { 394 if (rpwm_val & BIT(7)) {
396 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 395 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
397 (*(u8 *) val));
398 } else { 396 } else {
399 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 397 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
400 ((*(u8 *) val) | BIT(7)));
401 } 398 }
402 399
403 break; } 400 break; }
404 case HW_VAR_H2C_FW_PWRMODE:{ 401 case HW_VAR_H2C_FW_PWRMODE:{
405 u8 psmode = (*(u8 *) val); 402 u8 psmode = *val;
406 403
407 if (psmode != FW_PS_ACTIVE_MODE) 404 if (psmode != FW_PS_ACTIVE_MODE)
408 rtl8723ae_dm_rf_saving(hw, true); 405 rtl8723ae_dm_rf_saving(hw, true);
409 406
410 rtl8723ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val)); 407 rtl8723ae_set_fw_pwrmode_cmd(hw, *val);
411 break; } 408 break; }
412 case HW_VAR_FW_PSMODE_STATUS: 409 case HW_VAR_FW_PSMODE_STATUS:
413 ppsc->fw_current_inpsmode = *((bool *) val); 410 ppsc->fw_current_inpsmode = *((bool *) val);
414 break; 411 break;
415 case HW_VAR_H2C_FW_JOINBSSRPT:{ 412 case HW_VAR_H2C_FW_JOINBSSRPT:{
416 u8 mstatus = (*(u8 *) val); 413 u8 mstatus = *val;
417 u8 tmp_regcr, tmp_reg422; 414 u8 tmp_regcr, tmp_reg422;
418 bool recover = false; 415 bool recover = false;
419 416
@@ -446,11 +443,11 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
446 rtl_write_byte(rtlpriv, REG_CR + 1, 443 rtl_write_byte(rtlpriv, REG_CR + 1,
447 (tmp_regcr & ~(BIT(0)))); 444 (tmp_regcr & ~(BIT(0))));
448 } 445 }
449 rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 446 rtl8723ae_set_fw_joinbss_report_cmd(hw, *val);
450 447
451 break; } 448 break; }
452 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: 449 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
453 rtl8723ae_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); 450 rtl8723ae_set_p2p_ps_offload_cmd(hw, *val);
454 break; 451 break;
455 case HW_VAR_AID:{ 452 case HW_VAR_AID:{
456 u16 u2btmp; 453 u16 u2btmp;
@@ -460,7 +457,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
460 mac->assoc_id)); 457 mac->assoc_id));
461 break; } 458 break; }
462 case HW_VAR_CORRECT_TSF:{ 459 case HW_VAR_CORRECT_TSF:{
463 u8 btype_ibss = ((u8 *) (val))[0]; 460 u8 btype_ibss = *val;
464 461
465 if (btype_ibss == true) 462 if (btype_ibss == true)
466 _rtl8723ae_stop_tx_beacon(hw); 463 _rtl8723ae_stop_tx_beacon(hw);
@@ -490,20 +487,18 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
490 (u8 *)(&fw_current_inps)); 487 (u8 *)(&fw_current_inps));
491 rtlpriv->cfg->ops->set_hw_reg(hw, 488 rtlpriv->cfg->ops->set_hw_reg(hw,
492 HW_VAR_H2C_FW_PWRMODE, 489 HW_VAR_H2C_FW_PWRMODE,
493 (u8 *)(&ppsc->fwctrl_psmode)); 490 &ppsc->fwctrl_psmode);
494 491
495 rtlpriv->cfg->ops->set_hw_reg(hw, 492 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
496 HW_VAR_SET_RPWM, 493 &rpwm_val);
497 (u8 *)(&rpwm_val));
498 } else { 494 } else {
499 rpwm_val = 0x0C; /* RF on */ 495 rpwm_val = 0x0C; /* RF on */
500 fw_pwrmode = FW_PS_ACTIVE_MODE; 496 fw_pwrmode = FW_PS_ACTIVE_MODE;
501 fw_current_inps = false; 497 fw_current_inps = false;
502 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, 498 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
503 (u8 *)(&rpwm_val)); 499 &rpwm_val);
504 rtlpriv->cfg->ops->set_hw_reg(hw, 500 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
505 HW_VAR_H2C_FW_PWRMODE, 501 &fw_pwrmode);
506 (u8 *)(&fw_pwrmode));
507 502
508 rtlpriv->cfg->ops->set_hw_reg(hw, 503 rtlpriv->cfg->ops->set_hw_reg(hw,
509 HW_VAR_FW_PSMODE_STATUS, 504 HW_VAR_FW_PSMODE_STATUS,
@@ -880,23 +875,33 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
880 bool rtstatus = true; 875 bool rtstatus = true;
881 int err; 876 int err;
882 u8 tmp_u1b; 877 u8 tmp_u1b;
878 unsigned long flags;
883 879
884 rtlpriv->rtlhal.being_init_adapter = true; 880 rtlpriv->rtlhal.being_init_adapter = true;
881 /* As this function can take a very long time (up to 350 ms)
882 * and can be called with irqs disabled, reenable the irqs
883 * to let the other devices continue being serviced.
884 *
885 * It is safe doing so since our own interrupts will only be enabled
886 * in a subsequent step.
887 */
888 local_save_flags(flags);
889 local_irq_enable();
890
885 rtlpriv->intf_ops->disable_aspm(hw); 891 rtlpriv->intf_ops->disable_aspm(hw);
886 rtstatus = _rtl8712e_init_mac(hw); 892 rtstatus = _rtl8712e_init_mac(hw);
887 if (rtstatus != true) { 893 if (rtstatus != true) {
888 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); 894 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
889 err = 1; 895 err = 1;
890 return err; 896 goto exit;
891 } 897 }
892 898
893 err = rtl8723ae_download_fw(hw); 899 err = rtl8723_download_fw(hw, false);
894 if (err) { 900 if (err) {
895 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 901 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
896 "Failed to download FW. Init HW without FW now..\n"); 902 "Failed to download FW. Init HW without FW now..\n");
897 err = 1; 903 err = 1;
898 rtlhal->fw_ready = false; 904 goto exit;
899 return err;
900 } else { 905 } else {
901 rtlhal->fw_ready = true; 906 rtlhal->fw_ready = true;
902 } 907 }
@@ -971,6 +976,8 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
971 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); 976 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
972 } 977 }
973 rtl8723ae_dm_init(hw); 978 rtl8723ae_dm_init(hw);
979exit:
980 local_irq_restore(flags);
974 rtlpriv->rtlhal.being_init_adapter = false; 981 rtlpriv->rtlhal.being_init_adapter = false;
975 return err; 982 return err;
976} 983}
@@ -1112,12 +1119,13 @@ static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
1112void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1119void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1113{ 1120{
1114 struct rtl_priv *rtlpriv = rtl_priv(hw); 1121 struct rtl_priv *rtlpriv = rtl_priv(hw);
1115 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1122 u32 reg_rcr;
1116 u32 reg_rcr = rtlpci->receive_config;
1117 1123
1118 if (rtlpriv->psc.rfpwr_state != ERFON) 1124 if (rtlpriv->psc.rfpwr_state != ERFON)
1119 return; 1125 return;
1120 1126
1127 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1128
1121 if (check_bssid == true) { 1129 if (check_bssid == true) {
1122 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1130 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1123 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1131 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1153,7 +1161,7 @@ void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci)
1153{ 1161{
1154 struct rtl_priv *rtlpriv = rtl_priv(hw); 1162 struct rtl_priv *rtlpriv = rtl_priv(hw);
1155 1163
1156 rtl8723ae_dm_init_edca_turbo(hw); 1164 rtl8723_dm_init_edca_turbo(hw);
1157 switch (aci) { 1165 switch (aci) {
1158 case AC1_BK: 1166 case AC1_BK:
1159 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); 1167 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
@@ -1614,10 +1622,10 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
1614 rtl8723ae_read_bt_coexist_info_from_hwpg(hw, 1622 rtl8723ae_read_bt_coexist_info_from_hwpg(hw,
1615 rtlefuse->autoload_failflag, hwinfo); 1623 rtlefuse->autoload_failflag, hwinfo);
1616 1624
1617 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1625 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
1618 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 1626 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1619 rtlefuse->txpwr_fromeprom = true; 1627 rtlefuse->txpwr_fromeprom = true;
1620 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1628 rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
1621 1629
1622 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1630 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1623 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); 1631 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1655,7 +1663,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
1655 CHK_SVID_SMID(0x10EC, 0x9185)) 1663 CHK_SVID_SMID(0x10EC, 0x9185))
1656 rtlhal->oem_id = RT_CID_TOSHIBA; 1664 rtlhal->oem_id = RT_CID_TOSHIBA;
1657 else if (rtlefuse->eeprom_svid == 0x1025) 1665 else if (rtlefuse->eeprom_svid == 0x1025)
1658 rtlhal->oem_id = RT_CID_819x_Acer; 1666 rtlhal->oem_id = RT_CID_819X_ACER;
1659 else if (CHK_SVID_SMID(0x10EC, 0x6191) || 1667 else if (CHK_SVID_SMID(0x10EC, 0x6191) ||
1660 CHK_SVID_SMID(0x10EC, 0x6192) || 1668 CHK_SVID_SMID(0x10EC, 0x6192) ||
1661 CHK_SVID_SMID(0x10EC, 0x6193) || 1669 CHK_SVID_SMID(0x10EC, 0x6193) ||
@@ -1665,7 +1673,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
1665 CHK_SVID_SMID(0x10EC, 0x8191) || 1673 CHK_SVID_SMID(0x10EC, 0x8191) ||
1666 CHK_SVID_SMID(0x10EC, 0x8192) || 1674 CHK_SVID_SMID(0x10EC, 0x8192) ||
1667 CHK_SVID_SMID(0x10EC, 0x8193)) 1675 CHK_SVID_SMID(0x10EC, 0x8193))
1668 rtlhal->oem_id = RT_CID_819x_SAMSUNG; 1676 rtlhal->oem_id = RT_CID_819X_SAMSUNG;
1669 else if (CHK_SVID_SMID(0x10EC, 0x8195) || 1677 else if (CHK_SVID_SMID(0x10EC, 0x8195) ||
1670 CHK_SVID_SMID(0x10EC, 0x9195) || 1678 CHK_SVID_SMID(0x10EC, 0x9195) ||
1671 CHK_SVID_SMID(0x10EC, 0x7194) || 1679 CHK_SVID_SMID(0x10EC, 0x7194) ||
@@ -1673,24 +1681,24 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
1673 CHK_SVID_SMID(0x10EC, 0x8201) || 1681 CHK_SVID_SMID(0x10EC, 0x8201) ||
1674 CHK_SVID_SMID(0x10EC, 0x8202) || 1682 CHK_SVID_SMID(0x10EC, 0x8202) ||
1675 CHK_SVID_SMID(0x10EC, 0x9200)) 1683 CHK_SVID_SMID(0x10EC, 0x9200))
1676 rtlhal->oem_id = RT_CID_819x_Lenovo; 1684 rtlhal->oem_id = RT_CID_819X_LENOVO;
1677 else if (CHK_SVID_SMID(0x10EC, 0x8197) || 1685 else if (CHK_SVID_SMID(0x10EC, 0x8197) ||
1678 CHK_SVID_SMID(0x10EC, 0x9196)) 1686 CHK_SVID_SMID(0x10EC, 0x9196))
1679 rtlhal->oem_id = RT_CID_819x_CLEVO; 1687 rtlhal->oem_id = RT_CID_819X_CLEVO;
1680 else if (CHK_SVID_SMID(0x1028, 0x8194) || 1688 else if (CHK_SVID_SMID(0x1028, 0x8194) ||
1681 CHK_SVID_SMID(0x1028, 0x8198) || 1689 CHK_SVID_SMID(0x1028, 0x8198) ||
1682 CHK_SVID_SMID(0x1028, 0x9197) || 1690 CHK_SVID_SMID(0x1028, 0x9197) ||
1683 CHK_SVID_SMID(0x1028, 0x9198)) 1691 CHK_SVID_SMID(0x1028, 0x9198))
1684 rtlhal->oem_id = RT_CID_819x_DELL; 1692 rtlhal->oem_id = RT_CID_819X_DELL;
1685 else if (CHK_SVID_SMID(0x103C, 0x1629)) 1693 else if (CHK_SVID_SMID(0x103C, 0x1629))
1686 rtlhal->oem_id = RT_CID_819x_HP; 1694 rtlhal->oem_id = RT_CID_819X_HP;
1687 else if (CHK_SVID_SMID(0x1A32, 0x2315)) 1695 else if (CHK_SVID_SMID(0x1A32, 0x2315))
1688 rtlhal->oem_id = RT_CID_819x_QMI; 1696 rtlhal->oem_id = RT_CID_819X_QMI;
1689 else if (CHK_SVID_SMID(0x10EC, 0x8203)) 1697 else if (CHK_SVID_SMID(0x10EC, 0x8203))
1690 rtlhal->oem_id = RT_CID_819x_PRONETS; 1698 rtlhal->oem_id = RT_CID_819X_PRONETS;
1691 else if (CHK_SVID_SMID(0x1043, 0x84B5)) 1699 else if (CHK_SVID_SMID(0x1043, 0x84B5))
1692 rtlhal->oem_id = 1700 rtlhal->oem_id =
1693 RT_CID_819x_Edimax_ASUS; 1701 RT_CID_819X_EDIMAX_ASUS;
1694 else 1702 else
1695 rtlhal->oem_id = RT_CID_DEFAULT; 1703 rtlhal->oem_id = RT_CID_DEFAULT;
1696 } else if (rtlefuse->eeprom_did == 0x8178) { 1704 } else if (rtlefuse->eeprom_did == 0x8178) {
@@ -1712,12 +1720,12 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
1712 CHK_SVID_SMID(0x10EC, 0x9185)) 1720 CHK_SVID_SMID(0x10EC, 0x9185))
1713 rtlhal->oem_id = RT_CID_TOSHIBA; 1721 rtlhal->oem_id = RT_CID_TOSHIBA;
1714 else if (rtlefuse->eeprom_svid == 0x1025) 1722 else if (rtlefuse->eeprom_svid == 0x1025)
1715 rtlhal->oem_id = RT_CID_819x_Acer; 1723 rtlhal->oem_id = RT_CID_819X_ACER;
1716 else if (CHK_SVID_SMID(0x10EC, 0x8186)) 1724 else if (CHK_SVID_SMID(0x10EC, 0x8186))
1717 rtlhal->oem_id = RT_CID_819x_PRONETS; 1725 rtlhal->oem_id = RT_CID_819X_PRONETS;
1718 else if (CHK_SVID_SMID(0x1043, 0x8486)) 1726 else if (CHK_SVID_SMID(0x1043, 0x8486))
1719 rtlhal->oem_id = 1727 rtlhal->oem_id =
1720 RT_CID_819x_Edimax_ASUS; 1728 RT_CID_819X_EDIMAX_ASUS;
1721 else 1729 else
1722 rtlhal->oem_id = RT_CID_DEFAULT; 1730 rtlhal->oem_id = RT_CID_DEFAULT;
1723 } else { 1731 } else {
@@ -1731,7 +1739,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
1731 rtlhal->oem_id = RT_CID_CCX; 1739 rtlhal->oem_id = RT_CID_CCX;
1732 break; 1740 break;
1733 case EEPROM_CID_QMI: 1741 case EEPROM_CID_QMI:
1734 rtlhal->oem_id = RT_CID_819x_QMI; 1742 rtlhal->oem_id = RT_CID_819X_QMI;
1735 break; 1743 break;
1736 case EEPROM_CID_WHQL: 1744 case EEPROM_CID_WHQL:
1737 break; 1745 break;
@@ -2037,8 +2045,7 @@ void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw)
2037 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 2045 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2038 u16 sifs_timer; 2046 u16 sifs_timer;
2039 2047
2040 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2048 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
2041 (u8 *)&mac->slot_time);
2042 if (!mac->ht_enable) 2049 if (!mac->ht_enable)
2043 sifs_timer = 0x0a0a; 2050 sifs_timer = 0x0a0a;
2044 else 2051 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index 5d318a85eda4..3ea78afdec73 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -30,12 +30,14 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../ps.h" 32#include "../ps.h"
33#include "../core.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
36#include "rf.h" 37#include "rf.h"
37#include "dm.h" 38#include "dm.h"
38#include "table.h" 39#include "table.h"
40#include "../rtl8723com/phy_common.h"
39 41
40/* static forward definitions */ 42/* static forward definitions */
41static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw, 43static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
@@ -43,72 +45,17 @@ static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
43static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw, 45static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
44 enum radio_path rfpath, 46 enum radio_path rfpath,
45 u32 offset, u32 data); 47 u32 offset, u32 data);
46static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
47 enum radio_path rfpath, u32 offset);
48static void _phy_rf_serial_write(struct ieee80211_hw *hw,
49 enum radio_path rfpath, u32 offset, u32 data);
50static u32 _phy_calculate_bit_shift(u32 bitmask);
51static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw); 48static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
52static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw); 49static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw);
53static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype); 50static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype);
54static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype); 51static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype);
55static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
56static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
57 u32 cmdtableidx, u32 cmdtablesz,
58 enum swchnlcmd_id cmdid,
59 u32 para1, u32 para2,
60 u32 msdelay);
61static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, 52static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
62 u8 *stage, u8 *step, u32 *delay); 53 u8 *stage, u8 *step, u32 *delay);
63static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, 54static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
64 enum wireless_mode wirelessmode, 55 enum wireless_mode wirelessmode,
65 long power_indbm); 56 long power_indbm);
66static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
67 enum wireless_mode wirelessmode, u8 txpwridx);
68static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw); 57static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw);
69 58
70u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
71 u32 bitmask)
72{
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 u32 returnvalue, originalvalue, bitshift;
75
76 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
77 "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
78 originalvalue = rtl_read_dword(rtlpriv, regaddr);
79 bitshift = _phy_calculate_bit_shift(bitmask);
80 returnvalue = (originalvalue & bitmask) >> bitshift;
81
82 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
83 "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr,
84 originalvalue);
85
86 return returnvalue;
87}
88
89void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
90 u32 regaddr, u32 bitmask, u32 data)
91{
92 struct rtl_priv *rtlpriv = rtl_priv(hw);
93 u32 originalvalue, bitshift;
94
95 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
96 "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr,
97 bitmask, data);
98
99 if (bitmask != MASKDWORD) {
100 originalvalue = rtl_read_dword(rtlpriv, regaddr);
101 bitshift = _phy_calculate_bit_shift(bitmask);
102 data = ((originalvalue & (~bitmask)) | (data << bitshift));
103 }
104
105 rtl_write_dword(rtlpriv, regaddr, data);
106
107 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
108 "regaddr(%#x), bitmask(%#x), data(%#x)\n",
109 regaddr, bitmask, data);
110}
111
112u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, 59u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
113 enum radio_path rfpath, u32 regaddr, u32 bitmask) 60 enum radio_path rfpath, u32 regaddr, u32 bitmask)
114{ 61{
@@ -124,11 +71,11 @@ u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
124 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); 71 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
125 72
126 if (rtlphy->rf_mode != RF_OP_BY_FW) 73 if (rtlphy->rf_mode != RF_OP_BY_FW)
127 original_value = _phy_rf_serial_read(hw, rfpath, regaddr); 74 original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
128 else 75 else
129 original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr); 76 original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr);
130 77
131 bitshift = _phy_calculate_bit_shift(bitmask); 78 bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
132 readback_value = (original_value & bitmask) >> bitshift; 79 readback_value = (original_value & bitmask) >> bitshift;
133 80
134 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); 81 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -157,19 +104,19 @@ void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
157 104
158 if (rtlphy->rf_mode != RF_OP_BY_FW) { 105 if (rtlphy->rf_mode != RF_OP_BY_FW) {
159 if (bitmask != RFREG_OFFSET_MASK) { 106 if (bitmask != RFREG_OFFSET_MASK) {
160 original_value = _phy_rf_serial_read(hw, rfpath, 107 original_value = rtl8723_phy_rf_serial_read(hw, rfpath,
161 regaddr); 108 regaddr);
162 bitshift = _phy_calculate_bit_shift(bitmask); 109 bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
163 data = ((original_value & (~bitmask)) | 110 data = ((original_value & (~bitmask)) |
164 (data << bitshift)); 111 (data << bitshift));
165 } 112 }
166 113
167 _phy_rf_serial_write(hw, rfpath, regaddr, data); 114 rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data);
168 } else { 115 } else {
169 if (bitmask != RFREG_OFFSET_MASK) { 116 if (bitmask != RFREG_OFFSET_MASK) {
170 original_value = _phy_fw_rf_serial_read(hw, rfpath, 117 original_value = _phy_fw_rf_serial_read(hw, rfpath,
171 regaddr); 118 regaddr);
172 bitshift = _phy_calculate_bit_shift(bitmask); 119 bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
173 data = ((original_value & (~bitmask)) | 120 data = ((original_value & (~bitmask)) |
174 (data << bitshift)); 121 (data << bitshift));
175 } 122 }
@@ -197,87 +144,6 @@ static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
197 RT_ASSERT(false, "deprecated!\n"); 144 RT_ASSERT(false, "deprecated!\n");
198} 145}
199 146
200static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
201 enum radio_path rfpath, u32 offset)
202{
203 struct rtl_priv *rtlpriv = rtl_priv(hw);
204 struct rtl_phy *rtlphy = &(rtlpriv->phy);
205 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
206 u32 newoffset;
207 u32 tmplong, tmplong2;
208 u8 rfpi_enable = 0;
209 u32 retvalue;
210
211 offset &= 0x3f;
212 newoffset = offset;
213 if (RT_CANNOT_IO(hw)) {
214 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
215 return 0xFFFFFFFF;
216 }
217 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
218 if (rfpath == RF90_PATH_A)
219 tmplong2 = tmplong;
220 else
221 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
222 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
223 (newoffset << 23) | BLSSIREADEDGE;
224 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
225 tmplong & (~BLSSIREADEDGE));
226 mdelay(1);
227 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
228 mdelay(1);
229 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
230 tmplong | BLSSIREADEDGE);
231 mdelay(1);
232 if (rfpath == RF90_PATH_A)
233 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
234 BIT(8));
235 else if (rfpath == RF90_PATH_B)
236 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
237 BIT(8));
238 if (rfpi_enable)
239 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
240 BLSSIREADBACKDATA);
241 else
242 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
243 BLSSIREADBACKDATA);
244 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
245 rfpath, pphyreg->rf_rb, retvalue);
246 return retvalue;
247}
248
249static void _phy_rf_serial_write(struct ieee80211_hw *hw,
250 enum radio_path rfpath, u32 offset, u32 data)
251{
252 u32 data_and_addr;
253 u32 newoffset;
254 struct rtl_priv *rtlpriv = rtl_priv(hw);
255 struct rtl_phy *rtlphy = &(rtlpriv->phy);
256 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
257
258 if (RT_CANNOT_IO(hw)) {
259 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
260 return;
261 }
262 offset &= 0x3f;
263 newoffset = offset;
264 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
265 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
266 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
267 rfpath, pphyreg->rf3wire_offset, data_and_addr);
268}
269
270static u32 _phy_calculate_bit_shift(u32 bitmask)
271{
272 u32 i;
273
274 for (i = 0; i <= 31; i++) {
275 if (((bitmask >> i) & 0x1) == 1)
276 break;
277 }
278 return i;
279}
280
281static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw) 147static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw)
282{ 148{
283 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2); 149 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
@@ -307,7 +173,7 @@ bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw)
307 u8 tmpu1b; 173 u8 tmpu1b;
308 u8 reg_hwparafile = 1; 174 u8 reg_hwparafile = 1;
309 175
310 _phy_init_bb_rf_reg_def(hw); 176 rtl8723_phy_init_bb_rf_reg_def(hw);
311 177
312 /* 1. 0x28[1] = 1 */ 178 /* 1. 0x28[1] = 1 */
313 tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL); 179 tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL);
@@ -412,18 +278,7 @@ static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype)
412 phy_regarray_table = RTL8723EPHY_REG_1TARRAY; 278 phy_regarray_table = RTL8723EPHY_REG_1TARRAY;
413 if (configtype == BASEBAND_CONFIG_PHY_REG) { 279 if (configtype == BASEBAND_CONFIG_PHY_REG) {
414 for (i = 0; i < phy_reg_arraylen; i = i + 2) { 280 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
415 if (phy_regarray_table[i] == 0xfe) 281 rtl_addr_delay(phy_regarray_table[i]);
416 mdelay(50);
417 else if (phy_regarray_table[i] == 0xfd)
418 mdelay(5);
419 else if (phy_regarray_table[i] == 0xfc)
420 mdelay(1);
421 else if (phy_regarray_table[i] == 0xfb)
422 udelay(50);
423 else if (phy_regarray_table[i] == 0xfa)
424 udelay(5);
425 else if (phy_regarray_table[i] == 0xf9)
426 udelay(1);
427 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD, 282 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
428 phy_regarray_table[i + 1]); 283 phy_regarray_table[i + 1]);
429 udelay(1); 284 udelay(1);
@@ -585,18 +440,7 @@ static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype)
585 440
586 if (configtype == BASEBAND_CONFIG_PHY_REG) { 441 if (configtype == BASEBAND_CONFIG_PHY_REG) {
587 for (i = 0; i < phy_regarray_pg_len; i = i + 3) { 442 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
588 if (phy_regarray_table_pg[i] == 0xfe) 443 rtl_addr_delay(phy_regarray_table_pg[i]);
589 mdelay(50);
590 else if (phy_regarray_table_pg[i] == 0xfd)
591 mdelay(5);
592 else if (phy_regarray_table_pg[i] == 0xfc)
593 mdelay(1);
594 else if (phy_regarray_table_pg[i] == 0xfb)
595 udelay(50);
596 else if (phy_regarray_table_pg[i] == 0xfa)
597 udelay(5);
598 else if (phy_regarray_table_pg[i] == 0xf9)
599 udelay(1);
600 444
601 _st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i], 445 _st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i],
602 phy_regarray_table_pg[i + 1], 446 phy_regarray_table_pg[i + 1],
@@ -623,24 +467,9 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
623 switch (rfpath) { 467 switch (rfpath) {
624 case RF90_PATH_A: 468 case RF90_PATH_A:
625 for (i = 0; i < radioa_arraylen; i = i + 2) { 469 for (i = 0; i < radioa_arraylen; i = i + 2) {
626 if (radioa_array_table[i] == 0xfe) 470 rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
627 mdelay(50); 471 RFREG_OFFSET_MASK,
628 else if (radioa_array_table[i] == 0xfd) 472 radioa_array_table[i + 1]);
629 mdelay(5);
630 else if (radioa_array_table[i] == 0xfc)
631 mdelay(1);
632 else if (radioa_array_table[i] == 0xfb)
633 udelay(50);
634 else if (radioa_array_table[i] == 0xfa)
635 udelay(5);
636 else if (radioa_array_table[i] == 0xf9)
637 udelay(1);
638 else {
639 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
640 RFREG_OFFSET_MASK,
641 radioa_array_table[i + 1]);
642 udelay(1);
643 }
644 } 473 }
645 break; 474 break;
646 case RF90_PATH_B: 475 case RF90_PATH_B:
@@ -690,92 +519,6 @@ void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
690 ROFDM0_RXDETECTOR3, rtlphy->framesync); 519 ROFDM0_RXDETECTOR3, rtlphy->framesync);
691} 520}
692 521
693static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
694{
695 struct rtl_priv *rtlpriv = rtl_priv(hw);
696 struct rtl_phy *rtlphy = &(rtlpriv->phy);
697
698 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
699 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
700 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
701 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
702
703 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
704 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
705 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
706 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
707
708 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
709 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
710
711 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
712 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
713
714 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
715 RFPGA0_XA_LSSIPARAMETER;
716 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
717 RFPGA0_XB_LSSIPARAMETER;
718
719 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
720 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
721 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
722 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
723
724 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
725 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
726 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
727 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
728
729 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
730 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
731
732 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
733 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
734
735 rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
736 rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
737 rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
738 rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
739
740 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
741 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
742 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
743 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
744
745 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
746 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
747 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
748 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
749
750 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
751 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
752 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
753 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
754
755 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
756 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
757 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
758 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
759
760 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
761 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
762 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
763 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
764
765 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
766 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
767 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
768 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
769
770 rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
771 rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
772 rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
773 rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
774
775 rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
776 rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
777}
778
779void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) 522void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
780{ 523{
781 struct rtl_priv *rtlpriv = rtl_priv(hw); 524 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -785,17 +528,17 @@ void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
785 long txpwr_dbm; 528 long txpwr_dbm;
786 529
787 txpwr_level = rtlphy->cur_cck_txpwridx; 530 txpwr_level = rtlphy->cur_cck_txpwridx;
788 txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level); 531 txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
789 txpwr_level = rtlphy->cur_ofdm24g_txpwridx + 532 txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
790 rtlefuse->legacy_ht_txpowerdiff; 533 rtlefuse->legacy_ht_txpowerdiff;
791 if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm) 534 if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
792 txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, 535 txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
793 txpwr_level); 536 txpwr_level);
794 txpwr_level = rtlphy->cur_ofdm24g_txpwridx; 537 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
795 if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) > 538 if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
796 txpwr_dbm) 539 txpwr_dbm)
797 txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, 540 txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
798 txpwr_level); 541 txpwr_level);
799 *powerlevel = txpwr_dbm; 542 *powerlevel = txpwr_dbm;
800} 543}
801 544
@@ -912,28 +655,6 @@ static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
912 return txpwridx; 655 return txpwridx;
913} 656}
914 657
915static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
916 enum wireless_mode wirelessmode, u8 txpwridx)
917{
918 long offset;
919 long pwrout_dbm;
920
921 switch (wirelessmode) {
922 case WIRELESS_MODE_B:
923 offset = -7;
924 break;
925 case WIRELESS_MODE_G:
926 case WIRELESS_MODE_N_24G:
927 offset = -8;
928 break;
929 default:
930 offset = -8;
931 break;
932 }
933 pwrout_dbm = txpwridx / 2 + offset;
934 return pwrout_dbm;
935}
936
937void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) 658void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
938{ 659{
939 struct rtl_priv *rtlpriv = rtl_priv(hw); 660 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1117,26 +838,26 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
1117 u8 num_total_rfpath = rtlphy->num_total_rfpath; 838 u8 num_total_rfpath = rtlphy->num_total_rfpath;
1118 839
1119 precommoncmdcnt = 0; 840 precommoncmdcnt = 0;
1120 _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, 841 rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1121 MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, 842 MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
1122 0, 0, 0); 843 0, 0, 0);
1123 _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, 844 rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1124 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); 845 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
1125 postcommoncmdcnt = 0; 846 postcommoncmdcnt = 0;
1126 847
1127 _phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, 848 rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
1128 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); 849 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
1129 rfdependcmdcnt = 0; 850 rfdependcmdcnt = 0;
1130 851
1131 RT_ASSERT((channel >= 1 && channel <= 14), 852 RT_ASSERT((channel >= 1 && channel <= 14),
1132 "illegal channel for Zebra: %d\n", channel); 853 "illegal channel for Zebra: %d\n", channel);
1133 854
1134 _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, 855 rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1135 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, 856 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
1136 RF_CHNLBW, channel, 10); 857 RF_CHNLBW, channel, 10);
1137 858
1138 _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, 859 rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1139 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0); 860 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
1140 861
1141 do { 862 do {
1142 switch (*stage) { 863 switch (*stage) {
@@ -1204,29 +925,6 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
1204 return false; 925 return false;
1205} 926}
1206 927
1207static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
1208 u32 cmdtableidx, u32 cmdtablesz,
1209 enum swchnlcmd_id cmdid, u32 para1,
1210 u32 para2, u32 msdelay)
1211{
1212 struct swchnlcmd *pcmd;
1213
1214 if (cmdtable == NULL) {
1215 RT_ASSERT(false, "cmdtable cannot be NULL.\n");
1216 return false;
1217 }
1218
1219 if (cmdtableidx >= cmdtablesz)
1220 return false;
1221
1222 pcmd = cmdtable + cmdtableidx;
1223 pcmd->cmdid = cmdid;
1224 pcmd->para1 = para1;
1225 pcmd->para2 = para2;
1226 pcmd->msdelay = msdelay;
1227 return true;
1228}
1229
1230static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) 928static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
1231{ 929{
1232 u32 reg_eac, reg_e94, reg_e9c, reg_ea4; 930 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
@@ -1297,136 +995,6 @@ static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw)
1297 return result; 995 return result;
1298} 996}
1299 997
1300static void phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, bool iqk_ok,
1301 long result[][8], u8 final_candidate,
1302 bool btxonly)
1303{
1304 u32 oldval_0, x, tx0_a, reg;
1305 long y, tx0_c;
1306
1307 if (final_candidate == 0xFF) {
1308 return;
1309 } else if (iqk_ok) {
1310 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1311 MASKDWORD) >> 22) & 0x3FF;
1312 x = result[final_candidate][0];
1313 if ((x & 0x00000200) != 0)
1314 x = x | 0xFFFFFC00;
1315 tx0_a = (x * oldval_0) >> 8;
1316 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
1317 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
1318 ((x * oldval_0 >> 7) & 0x1));
1319 y = result[final_candidate][1];
1320 if ((y & 0x00000200) != 0)
1321 y = y | 0xFFFFFC00;
1322 tx0_c = (y * oldval_0) >> 8;
1323 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1324 ((tx0_c & 0x3C0) >> 6));
1325 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
1326 (tx0_c & 0x3F));
1327 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
1328 ((y * oldval_0 >> 7) & 0x1));
1329 if (btxonly)
1330 return;
1331 reg = result[final_candidate][2];
1332 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
1333 reg = result[final_candidate][3] & 0x3F;
1334 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
1335 reg = (result[final_candidate][3] >> 6) & 0xF;
1336 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1337 }
1338}
1339
1340static void phy_save_adda_regs(struct ieee80211_hw *hw,
1341 u32 *addareg, u32 *addabackup,
1342 u32 registernum)
1343{
1344 u32 i;
1345
1346 for (i = 0; i < registernum; i++)
1347 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1348}
1349
1350static void phy_save_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
1351 u32 *macbackup)
1352{
1353 struct rtl_priv *rtlpriv = rtl_priv(hw);
1354 u32 i;
1355
1356 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1357 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1358 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1359}
1360
1361static void phy_reload_adda_regs(struct ieee80211_hw *hw, u32 *addareg,
1362 u32 *addabackup, u32 regiesternum)
1363{
1364 u32 i;
1365
1366 for (i = 0; i < regiesternum; i++)
1367 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
1368}
1369
1370static void phy_reload_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
1371 u32 *macbackup)
1372{
1373 struct rtl_priv *rtlpriv = rtl_priv(hw);
1374 u32 i;
1375
1376 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1377 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1378 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1379}
1380
1381static void _rtl8723ae_phy_path_adda_on(struct ieee80211_hw *hw,
1382 u32 *addareg, bool is_patha_on,
1383 bool is2t)
1384{
1385 u32 pathOn;
1386 u32 i;
1387
1388 pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1389 if (false == is2t) {
1390 pathOn = 0x0bdb25a0;
1391 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1392 } else {
1393 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
1394 }
1395
1396 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1397 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
1398}
1399
1400static void _rtl8723ae_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1401 u32 *macreg, u32 *macbackup)
1402{
1403 struct rtl_priv *rtlpriv = rtl_priv(hw);
1404 u32 i = 0;
1405
1406 rtl_write_byte(rtlpriv, macreg[i], 0x3F);
1407
1408 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1409 rtl_write_byte(rtlpriv, macreg[i],
1410 (u8) (macbackup[i] & (~BIT(3))));
1411 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1412}
1413
1414static void _rtl8723ae_phy_path_a_standby(struct ieee80211_hw *hw)
1415{
1416 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1417 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1418 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1419}
1420
1421static void _rtl8723ae_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1422{
1423 u32 mode;
1424
1425 mode = pi_mode ? 0x01000100 : 0x01000000;
1426 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1427 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1428}
1429
1430static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8], 998static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8],
1431 u8 c1, u8 c2) 999 u8 c1, u8 c2)
1432{ 1000{
@@ -1498,10 +1066,12 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
1498 const u32 retrycount = 2; 1066 const u32 retrycount = 2;
1499 1067
1500 if (t == 0) { 1068 if (t == 0) {
1501 phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16); 1069 rtl8723_save_adda_registers(hw, adda_reg, rtlphy->adda_backup,
1502 phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); 1070 16);
1071 rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
1072 rtlphy->iqk_mac_backup);
1503 } 1073 }
1504 _rtl8723ae_phy_path_adda_on(hw, adda_reg, true, is2t); 1074 rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
1505 if (t == 0) { 1075 if (t == 0) {
1506 rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, 1076 rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
1507 RFPGA0_XA_HSSIPARAMETER1, 1077 RFPGA0_XA_HSSIPARAMETER1,
@@ -1509,7 +1079,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
1509 } 1079 }
1510 1080
1511 if (!rtlphy->rfpi_enable) 1081 if (!rtlphy->rfpi_enable)
1512 _rtl8723ae_phy_pi_mode_switch(hw, true); 1082 rtl8723_phy_pi_mode_switch(hw, true);
1513 if (t == 0) { 1083 if (t == 0) {
1514 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD); 1084 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
1515 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD); 1085 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
@@ -1522,7 +1092,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
1522 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); 1092 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1523 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000); 1093 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1524 } 1094 }
1525 _rtl8723ae_phy_mac_setting_calibration(hw, iqk_mac_reg, 1095 rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
1526 rtlphy->iqk_mac_backup); 1096 rtlphy->iqk_mac_backup);
1527 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000); 1097 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
1528 if (is2t) 1098 if (is2t)
@@ -1552,8 +1122,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
1552 } 1122 }
1553 1123
1554 if (is2t) { 1124 if (is2t) {
1555 _rtl8723ae_phy_path_a_standby(hw); 1125 rtl8723_phy_path_a_standby(hw);
1556 _rtl8723ae_phy_path_adda_on(hw, adda_reg, false, is2t); 1126 rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
1557 for (i = 0; i < retrycount; i++) { 1127 for (i = 0; i < retrycount; i++) {
1558 pathb_ok = _rtl8723ae_phy_path_b_iqk(hw); 1128 pathb_ok = _rtl8723ae_phy_path_b_iqk(hw);
1559 if (pathb_ok == 0x03) { 1129 if (pathb_ok == 0x03) {
@@ -1588,9 +1158,11 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
1588 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3); 1158 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1589 if (t != 0) { 1159 if (t != 0) {
1590 if (!rtlphy->rfpi_enable) 1160 if (!rtlphy->rfpi_enable)
1591 _rtl8723ae_phy_pi_mode_switch(hw, false); 1161 rtl8723_phy_pi_mode_switch(hw, false);
1592 phy_reload_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16); 1162 rtl8723_phy_reload_adda_registers(hw, adda_reg,
1593 phy_reload_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); 1163 rtlphy->adda_backup, 16);
1164 rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
1165 rtlphy->iqk_mac_backup);
1594 } 1166 }
1595} 1167}
1596 1168
@@ -1691,7 +1263,8 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1691 }; 1263 };
1692 1264
1693 if (recovery) { 1265 if (recovery) {
1694 phy_reload_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10); 1266 rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
1267 rtlphy->iqk_bb_backup, 10);
1695 return; 1268 return;
1696 } 1269 }
1697 if (start_conttx || singletone) 1270 if (start_conttx || singletone)
@@ -1756,9 +1329,10 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1756 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0; 1329 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
1757 } 1330 }
1758 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ 1331 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
1759 phy_path_a_fill_iqk_matrix(hw, patha_ok, result, 1332 rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
1760 final_candidate, (reg_ea4 == 0)); 1333 final_candidate,
1761 phy_save_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10); 1334 (reg_ea4 == 0));
1335 rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
1762} 1336}
1763 1337
1764void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw) 1338void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index 007ebdbbe108..cd43139ed332 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -76,23 +76,6 @@
76 76
77#define RTL92C_MAX_PATH_NUM 2 77#define RTL92C_MAX_PATH_NUM 2
78 78
79enum swchnlcmd_id {
80 CMDID_END,
81 CMDID_SET_TXPOWEROWER_LEVEL,
82 CMDID_BBREGWRITE10,
83 CMDID_WRITEPORT_ULONG,
84 CMDID_WRITEPORT_USHORT,
85 CMDID_WRITEPORT_UCHAR,
86 CMDID_RF_WRITEREG,
87};
88
89struct swchnlcmd {
90 enum swchnlcmd_id cmdid;
91 u32 para1;
92 u32 para2;
93 u32 msdelay;
94};
95
96enum hw90_block_e { 79enum hw90_block_e {
97 HW90_BLOCK_MAC = 0, 80 HW90_BLOCK_MAC = 0,
98 HW90_BLOCK_PHY0 = 1, 81 HW90_BLOCK_PHY0 = 1,
@@ -183,10 +166,6 @@ struct tx_power_struct {
183 u32 mcs_original_offset[4][16]; 166 u32 mcs_original_offset[4][16];
184}; 167};
185 168
186u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
187 u32 regaddr, u32 bitmask);
188void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
189 u32 regaddr, u32 bitmask, u32 data);
190u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, 169u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
191 enum radio_path rfpath, u32 regaddr, 170 enum radio_path rfpath, u32 regaddr,
192 u32 bitmask); 171 u32 bitmask);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
index 7a46f9fdf558..a418acb4d0ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
@@ -30,7 +30,6 @@
30#ifndef __RTL8723E_PWRSEQ_H__ 30#ifndef __RTL8723E_PWRSEQ_H__
31#define __RTL8723E_PWRSEQ_H__ 31#define __RTL8723E_PWRSEQ_H__
32 32
33#include "pwrseqcmd.h"
34/* 33/*
35 Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd 34 Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
36 There are 6 HW Power States: 35 There are 6 HW Power States:
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
index 199da366c6da..64376b38708b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
@@ -2059,22 +2059,6 @@
2059#define BWORD1 0xc 2059#define BWORD1 0xc
2060#define BWORD 0xf 2060#define BWORD 0xf
2061 2061
2062#define MASKBYTE0 0xff
2063#define MASKBYTE1 0xff00
2064#define MASKBYTE2 0xff0000
2065#define MASKBYTE3 0xff000000
2066#define MASKHWORD 0xffff0000
2067#define MASKLWORD 0x0000ffff
2068#define MASKDWORD 0xffffffff
2069#define MASK12BITS 0xfff
2070#define MASKH4BITS 0xf0000000
2071#define MASKOFDM_D 0xffc00000
2072#define MASKCCK 0x3f3f3f3f
2073
2074#define MASK4BITS 0x0f
2075#define MASK20BITS 0xfffff
2076#define RFREG_OFFSET_MASK 0xfffff
2077
2078#define BENABLE 0x1 2062#define BENABLE 0x1
2079#define BDISABLE 0x0 2063#define BDISABLE 0x0
2080 2064
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 62b204faf773..1087a3bd07fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -37,8 +37,11 @@
37#include "reg.h" 37#include "reg.h"
38#include "def.h" 38#include "def.h"
39#include "phy.h" 39#include "phy.h"
40#include "../rtl8723com/phy_common.h"
40#include "dm.h" 41#include "dm.h"
41#include "hw.h" 42#include "hw.h"
43#include "fw.h"
44#include "../rtl8723com/fw_common.h"
42#include "sw.h" 45#include "sw.h"
43#include "trx.h" 46#include "trx.h"
44#include "led.h" 47#include "led.h"
@@ -193,6 +196,11 @@ void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw)
193 } 196 }
194} 197}
195 198
199static bool is_fw_header(struct rtl92c_firmware_header *hdr)
200{
201 return (hdr->signature & 0xfff0) == 0x2300;
202}
203
196static struct rtl_hal_ops rtl8723ae_hal_ops = { 204static struct rtl_hal_ops rtl8723ae_hal_ops = {
197 .init_sw_vars = rtl8723ae_init_sw_vars, 205 .init_sw_vars = rtl8723ae_init_sw_vars,
198 .deinit_sw_vars = rtl8723ae_deinit_sw_vars, 206 .deinit_sw_vars = rtl8723ae_deinit_sw_vars,
@@ -231,13 +239,14 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
231 .set_key = rtl8723ae_set_key, 239 .set_key = rtl8723ae_set_key,
232 .init_sw_leds = rtl8723ae_init_sw_leds, 240 .init_sw_leds = rtl8723ae_init_sw_leds,
233 .allow_all_destaddr = rtl8723ae_allow_all_destaddr, 241 .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
234 .get_bbreg = rtl8723ae_phy_query_bb_reg, 242 .get_bbreg = rtl8723_phy_query_bb_reg,
235 .set_bbreg = rtl8723ae_phy_set_bb_reg, 243 .set_bbreg = rtl8723_phy_set_bb_reg,
236 .get_rfreg = rtl8723ae_phy_query_rf_reg, 244 .get_rfreg = rtl8723ae_phy_query_rf_reg,
237 .set_rfreg = rtl8723ae_phy_set_rf_reg, 245 .set_rfreg = rtl8723ae_phy_set_rf_reg,
238 .c2h_command_handle = rtl_8723e_c2h_command_handle, 246 .c2h_command_handle = rtl_8723e_c2h_command_handle,
239 .bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify, 247 .bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify,
240 .bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps, 248 .bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps,
249 .is_fw_header = is_fw_header,
241}; 250};
242 251
243static struct rtl_mod_params rtl8723ae_mod_params = { 252static struct rtl_mod_params rtl8723ae_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index 50b7be3f3a60..10b7577b6ae5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -334,7 +334,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
334 /* during testing, hdr could be NULL here */ 334 /* during testing, hdr could be NULL here */
335 return false; 335 return false;
336 } 336 }
337 if ((ieee80211_is_robust_mgmt_frame(hdr)) && 337 if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
338 (ieee80211_has_protected(hdr->frame_control))) 338 (ieee80211_has_protected(hdr->frame_control)))
339 rx_status->flag &= ~RX_FLAG_DECRYPTED; 339 rx_status->flag &= ~RX_FLAG_DECRYPTED;
340 else 340 else
@@ -365,7 +365,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
365 365
366void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, 366void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
367 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 367 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
368 struct ieee80211_tx_info *info, 368 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
369 struct ieee80211_sta *sta, 369 struct ieee80211_sta *sta,
370 struct sk_buff *skb, u8 hw_queue, 370 struct sk_buff *skb, u8 hw_queue,
371 struct rtl_tcb_desc *ptcdesc) 371 struct rtl_tcb_desc *ptcdesc)
@@ -375,7 +375,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
375 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 375 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
376 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 376 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
377 bool defaultadapter = true; 377 bool defaultadapter = true;
378 u8 *pdesc = (u8 *) pdesc_tx; 378 u8 *pdesc = pdesc_tx;
379 u16 seq_number; 379 u16 seq_number;
380 __le16 fc = hdr->frame_control; 380 __le16 fc = hdr->frame_control;
381 u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue); 381 u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -577,7 +577,7 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
577 577
578 SET_TX_DESC_OWN(pdesc, 1); 578 SET_TX_DESC_OWN(pdesc, 1);
579 579
580 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); 580 SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
581 581
582 SET_TX_DESC_FIRST_SEG(pdesc, 1); 582 SET_TX_DESC_FIRST_SEG(pdesc, 1);
583 SET_TX_DESC_LAST_SEG(pdesc, 1); 583 SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -597,7 +597,8 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
597 pdesc, TX_DESC_SIZE); 597 pdesc, TX_DESC_SIZE);
598} 598}
599 599
600void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val) 600void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
601 u8 desc_name, u8 *val)
601{ 602{
602 if (istx == true) { 603 if (istx == true) {
603 switch (desc_name) { 604 switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
index ad05b54bc0f1..4380b7d3a91a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
@@ -521,12 +521,6 @@ do { \
521 memset(__pdesc, 0, _size); \ 521 memset(__pdesc, 0, _size); \
522} while (0) 522} while (0)
523 523
524#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \
525 ((rxmcs) == DESC92_RATE1M || \
526 (rxmcs) == DESC92_RATE2M || \
527 (rxmcs) == DESC92_RATE5_5M || \
528 (rxmcs) == DESC92_RATE11M)
529
530struct rx_fwinfo_8723e { 524struct rx_fwinfo_8723e {
531 u8 gain_trsw[4]; 525 u8 gain_trsw[4];
532 u8 pwdb_all; 526 u8 pwdb_all;
@@ -706,8 +700,8 @@ struct rx_desc_8723e {
706} __packed; 700} __packed;
707 701
708void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, 702void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
709 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 703 struct ieee80211_hdr *hdr, u8 *pdesc,
710 struct ieee80211_tx_info *info, 704 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
711 struct ieee80211_sta *sta, 705 struct ieee80211_sta *sta,
712 struct sk_buff *skb, u8 hw_queue, 706 struct sk_buff *skb, u8 hw_queue,
713 struct rtl_tcb_desc *ptcb_desc); 707 struct rtl_tcb_desc *ptcb_desc);
@@ -715,7 +709,8 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
715 struct rtl_stats *status, 709 struct rtl_stats *status,
716 struct ieee80211_rx_status *rx_status, 710 struct ieee80211_rx_status *rx_status,
717 u8 *pdesc, struct sk_buff *skb); 711 u8 *pdesc, struct sk_buff *skb);
718void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val); 712void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
713 u8 desc_name, u8 *val);
719u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name); 714u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
720void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); 715void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
721void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, 716void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
new file mode 100644
index 000000000000..59e416abd93a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
@@ -0,0 +1,19 @@
1obj-m := rtl8723be.o
2
3
4rtl8723be-objs := \
5 dm.o \
6 fw.o \
7 hw.o \
8 led.o \
9 phy.o \
10 pwrseq.o \
11 rf.o \
12 sw.o \
13 table.o \
14 trx.o \
15
16
17obj-$(CONFIG_RTL8723BE) += rtl8723be.o
18
19ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/rtlwifi/rtl8723be/def.h
new file mode 100644
index 000000000000..3c30b74e983d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/def.h
@@ -0,0 +1,248 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_DEF_H__
27#define __RTL8723BE_DEF_H__
28
29#define HAL_RETRY_LIMIT_INFRA 48
30#define HAL_RETRY_LIMIT_AP_ADHOC 7
31
32#define RESET_DELAY_8185 20
33
34#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
35#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
36
37#define NUM_OF_FIRMWARE_QUEUE 10
38#define NUM_OF_PAGES_IN_FW 0x100
39#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
40#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
41#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
42#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
43#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
44#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
45#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
46#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
47#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
48#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
49
50#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
51#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
52#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
53#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
54#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
55
56#define MAX_LINES_HWCONFIG_TXT 1000
57#define MAX_BYTES_LINE_HWCONFIG_TXT 256
58
59#define SW_THREE_WIRE 0
60#define HW_THREE_WIRE 2
61
62#define BT_DEMO_BOARD 0
63#define BT_QA_BOARD 1
64#define BT_FPGA 2
65
66#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
67#define HAL_PRIME_CHNL_OFFSET_LOWER 1
68#define HAL_PRIME_CHNL_OFFSET_UPPER 2
69
70#define MAX_H2C_QUEUE_NUM 10
71
72#define RX_MPDU_QUEUE 0
73#define RX_CMD_QUEUE 1
74#define RX_MAX_QUEUE 2
75#define AC2QUEUEID(_AC) (_AC)
76
77#define C2H_RX_CMD_HDR_LEN 8
78#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
79 LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
80#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
81 LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
82#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
83 LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
84#define GET_C2H_CMD_CONTINUE(__prxhdr) \
85 LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
86#define GET_C2H_CMD_CONTENT(__prxhdr) \
87 ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
88
89#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
90 LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
91#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
92 LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
93#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
94 LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
95#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
96 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
97#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
98 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
99#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
100 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
101#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
102 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
103#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
104 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
105#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
106 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
107
108#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
109#define CHIP_BONDING_92C_1T2R 0x1
110
111#define CHIP_8723 BIT(0)
112#define CHIP_8723B (BIT(1) | BIT(2))
113#define NORMAL_CHIP BIT(3)
114#define RF_TYPE_1T1R (~(BIT(4) | BIT(5) | BIT(6)))
115#define RF_TYPE_1T2R BIT(4)
116#define RF_TYPE_2T2R BIT(5)
117#define CHIP_VENDOR_UMC BIT(7)
118#define B_CUT_VERSION BIT(12)
119#define C_CUT_VERSION BIT(13)
120#define D_CUT_VERSION ((BIT(12) | BIT(13)))
121#define E_CUT_VERSION BIT(14)
122#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
123
124/* MASK */
125#define IC_TYPE_MASK (BIT(0) | BIT(1) | BIT(2))
126#define CHIP_TYPE_MASK BIT(3)
127#define RF_TYPE_MASK (BIT(4) | BIT(5) | BIT(6))
128#define MANUFACTUER_MASK BIT(7)
129#define ROM_VERSION_MASK (BIT(11) | BIT(10) | BIT(9) | BIT(8))
130#define CUT_VERSION_MASK (BIT(15) | BIT(14) | BIT(13) | BIT(12))
131
132/* Get element */
133#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
134#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK)
135#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK)
136#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
137#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK)
138#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
139
140#define IS_92C_SERIAL(version) ((IS_81XXC(version) && IS_2T2R(version)) ?\
141 true : false)
142#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\
143 true : false)
144#define IS_8723_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8723) ?\
145 true : false)
146#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? false : true)
147#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\
148 ? true : false)
149#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\
150 ? true : false)
151enum rf_optype {
152 RF_OP_BY_SW_3WIRE = 0,
153 RF_OP_BY_FW,
154 RF_OP_MAX
155};
156
157enum rf_power_state {
158 RF_ON,
159 RF_OFF,
160 RF_SLEEP,
161 RF_SHUT_DOWN,
162};
163
164enum power_save_mode {
165 POWER_SAVE_MODE_ACTIVE,
166 POWER_SAVE_MODE_SAVE,
167};
168
169enum power_polocy_config {
170 POWERCFG_MAX_POWER_SAVINGS,
171 POWERCFG_GLOBAL_POWER_SAVINGS,
172 POWERCFG_LOCAL_POWER_SAVINGS,
173 POWERCFG_LENOVO,
174};
175
176enum interface_select_pci {
177 INTF_SEL1_MINICARD = 0,
178 INTF_SEL0_PCIE = 1,
179 INTF_SEL2_RSV = 2,
180 INTF_SEL3_RSV = 3,
181};
182
183enum rtl_desc_qsel {
184 QSLT_BK = 0x2,
185 QSLT_BE = 0x0,
186 QSLT_VI = 0x5,
187 QSLT_VO = 0x7,
188 QSLT_BEACON = 0x10,
189 QSLT_HIGH = 0x11,
190 QSLT_MGNT = 0x12,
191 QSLT_CMD = 0x13,
192};
193
194enum rtl_desc8723e_rate {
195 DESC92C_RATE1M = 0x00,
196 DESC92C_RATE2M = 0x01,
197 DESC92C_RATE5_5M = 0x02,
198 DESC92C_RATE11M = 0x03,
199
200 DESC92C_RATE6M = 0x04,
201 DESC92C_RATE9M = 0x05,
202 DESC92C_RATE12M = 0x06,
203 DESC92C_RATE18M = 0x07,
204 DESC92C_RATE24M = 0x08,
205 DESC92C_RATE36M = 0x09,
206 DESC92C_RATE48M = 0x0a,
207 DESC92C_RATE54M = 0x0b,
208
209 DESC92C_RATEMCS0 = 0x0c,
210 DESC92C_RATEMCS1 = 0x0d,
211 DESC92C_RATEMCS2 = 0x0e,
212 DESC92C_RATEMCS3 = 0x0f,
213 DESC92C_RATEMCS4 = 0x10,
214 DESC92C_RATEMCS5 = 0x11,
215 DESC92C_RATEMCS6 = 0x12,
216 DESC92C_RATEMCS7 = 0x13,
217 DESC92C_RATEMCS8 = 0x14,
218 DESC92C_RATEMCS9 = 0x15,
219 DESC92C_RATEMCS10 = 0x16,
220 DESC92C_RATEMCS11 = 0x17,
221 DESC92C_RATEMCS12 = 0x18,
222 DESC92C_RATEMCS13 = 0x19,
223 DESC92C_RATEMCS14 = 0x1a,
224 DESC92C_RATEMCS15 = 0x1b,
225 DESC92C_RATEMCS15_SG = 0x1c,
226 DESC92C_RATEMCS32 = 0x20,
227};
228
229enum rx_packet_type {
230 NORMAL_RX,
231 TX_REPORT1,
232 TX_REPORT2,
233 HIS_REPORT,
234};
235
236struct phy_sts_cck_8723e_t {
237 u8 adc_pwdb_X[4];
238 u8 sq_rpt;
239 u8 cck_agc_rpt;
240};
241
242struct h2c_cmd_8723e {
243 u8 element_id;
244 u32 cmd_len;
245 u8 *p_cmdbuffer;
246};
247
248#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
new file mode 100644
index 000000000000..13d53a1df789
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
@@ -0,0 +1,1325 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../base.h"
28#include "../pci.h"
29#include "reg.h"
30#include "def.h"
31#include "phy.h"
32#include "dm.h"
33#include "../rtl8723com/dm_common.h"
34#include "fw.h"
35#include "../rtl8723com/fw_common.h"
36#include "trx.h"
37#include "../btcoexist/rtl_btc.h"
38
39static const u32 ofdmswing_table[] = {
40 0x0b40002d, /* 0, -15.0dB */
41 0x0c000030, /* 1, -14.5dB */
42 0x0cc00033, /* 2, -14.0dB */
43 0x0d800036, /* 3, -13.5dB */
44 0x0e400039, /* 4, -13.0dB */
45 0x0f00003c, /* 5, -12.5dB */
46 0x10000040, /* 6, -12.0dB */
47 0x11000044, /* 7, -11.5dB */
48 0x12000048, /* 8, -11.0dB */
49 0x1300004c, /* 9, -10.5dB */
50 0x14400051, /* 10, -10.0dB */
51 0x15800056, /* 11, -9.5dB */
52 0x16c0005b, /* 12, -9.0dB */
53 0x18000060, /* 13, -8.5dB */
54 0x19800066, /* 14, -8.0dB */
55 0x1b00006c, /* 15, -7.5dB */
56 0x1c800072, /* 16, -7.0dB */
57 0x1e400079, /* 17, -6.5dB */
58 0x20000080, /* 18, -6.0dB */
59 0x22000088, /* 19, -5.5dB */
60 0x24000090, /* 20, -5.0dB */
61 0x26000098, /* 21, -4.5dB */
62 0x288000a2, /* 22, -4.0dB */
63 0x2ac000ab, /* 23, -3.5dB */
64 0x2d4000b5, /* 24, -3.0dB */
65 0x300000c0, /* 25, -2.5dB */
66 0x32c000cb, /* 26, -2.0dB */
67 0x35c000d7, /* 27, -1.5dB */
68 0x390000e4, /* 28, -1.0dB */
69 0x3c8000f2, /* 29, -0.5dB */
70 0x40000100, /* 30, +0dB */
71 0x43c0010f, /* 31, +0.5dB */
72 0x47c0011f, /* 32, +1.0dB */
73 0x4c000130, /* 33, +1.5dB */
74 0x50800142, /* 34, +2.0dB */
75 0x55400155, /* 35, +2.5dB */
76 0x5a400169, /* 36, +3.0dB */
77 0x5fc0017f, /* 37, +3.5dB */
78 0x65400195, /* 38, +4.0dB */
79 0x6b8001ae, /* 39, +4.5dB */
80 0x71c001c7, /* 40, +5.0dB */
81 0x788001e2, /* 41, +5.5dB */
82 0x7f8001fe /* 42, +6.0dB */
83};
84
85static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
86 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */
87 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */
88 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */
89 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */
90 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */
91 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */
92 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */
93 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */
94 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */
95 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */
96 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
97 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
98 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
99 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
100 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
101 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
102 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
103 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
104 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
105 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
106 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
107 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
108 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
109 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
110 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
111 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
112 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
113 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
114 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
115 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
116 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
117 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
118 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */
119};
120
121static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
122 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */
123 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */
124 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */
125 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */
126 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */
127 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */
128 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */
129 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */
130 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */
131 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */
132 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
133 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
134 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
135 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
136 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
137 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
138 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
139 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
140 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
141 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
142 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
143 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
144 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
145 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
146 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
147 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
148 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
149 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
150 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
151 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
152 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
153 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
154 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
155};
156
157static const u32 edca_setting_dl[PEER_MAX] = {
158 0xa44f, /* 0 UNKNOWN */
159 0x5ea44f, /* 1 REALTEK_90 */
160 0x5e4322, /* 2 REALTEK_92SE */
161 0x5ea42b, /* 3 BROAD */
162 0xa44f, /* 4 RAL */
163 0xa630, /* 5 ATH */
164 0x5ea630, /* 6 CISCO */
165 0x5ea42b, /* 7 MARVELL */
166};
167
168static const u32 edca_setting_ul[PEER_MAX] = {
169 0x5e4322, /* 0 UNKNOWN */
170 0xa44f, /* 1 REALTEK_90 */
171 0x5ea44f, /* 2 REALTEK_92SE */
172 0x5ea32b, /* 3 BROAD */
173 0x5ea422, /* 4 RAL */
174 0x5ea322, /* 5 ATH */
175 0x3ea430, /* 6 CISCO */
176 0x5ea44f, /* 7 MARV */
177};
178
179void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
180 u8 *pdirection, u32 *poutwrite_val)
181{
182 struct rtl_priv *rtlpriv = rtl_priv(hw);
183 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
184 u8 pwr_val = 0;
185 u8 ofdm_base = rtlpriv->dm.swing_idx_ofdm_base[RF90_PATH_A];
186 u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
187 u8 cck_base = rtldm->swing_idx_cck_base;
188 u8 cck_val = rtldm->swing_idx_cck;
189
190 if (type == 0) {
191 if (ofdm_val <= ofdm_base) {
192 *pdirection = 1;
193 pwr_val = ofdm_base - ofdm_val;
194 } else {
195 *pdirection = 2;
196 pwr_val = ofdm_val - ofdm_base;
197 }
198 } else if (type == 1) {
199 if (cck_val <= cck_base) {
200 *pdirection = 1;
201 pwr_val = cck_base - cck_val;
202 } else {
203 *pdirection = 2;
204 pwr_val = cck_val - cck_base;
205 }
206 }
207
208 if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
209 pwr_val = TXPWRTRACK_MAX_IDX;
210
211 *poutwrite_val = pwr_val | (pwr_val << 8) |
212 (pwr_val << 16) | (pwr_val << 24);
213}
214
215static void rtl8723be_dm_diginit(struct ieee80211_hw *hw)
216{
217 struct rtl_priv *rtlpriv = rtl_priv(hw);
218 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
219
220 dm_digtable->dig_enable_flag = true;
221 dm_digtable->cur_igvalue = rtl_get_bbreg(hw,
222 ROFDM0_XAAGCCORE1, 0x7f);
223 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
224 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
225 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
226 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
227 dm_digtable->rx_gain_max = DM_DIG_MAX;
228 dm_digtable->rx_gain_min = DM_DIG_MIN;
229 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
230 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
231 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
232 dm_digtable->pre_cck_cca_thres = 0xff;
233 dm_digtable->cur_cck_cca_thres = 0x83;
234 dm_digtable->forbidden_igi = DM_DIG_MIN;
235 dm_digtable->large_fa_hit = 0;
236 dm_digtable->recover_cnt = 0;
237 dm_digtable->dig_min_0 = DM_DIG_MIN;
238 dm_digtable->dig_min_1 = DM_DIG_MIN;
239 dm_digtable->media_connect_0 = false;
240 dm_digtable->media_connect_1 = false;
241 rtlpriv->dm.dm_initialgain_enable = true;
242 dm_digtable->bt30_cur_igi = 0x32;
243}
244
245void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
246{
247 struct rtl_priv *rtlpriv = rtl_priv(hw);
248 struct rate_adaptive *ra = &(rtlpriv->ra);
249
250 ra->ratr_state = DM_RATR_STA_INIT;
251 ra->pre_ratr_state = DM_RATR_STA_INIT;
252
253 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
254 rtlpriv->dm.useramask = true;
255 else
256 rtlpriv->dm.useramask = false;
257
258 ra->high_rssi_thresh_for_ra = 50;
259 ra->low_rssi_thresh_for_ra40m = 20;
260}
261
262static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw)
263{
264 struct rtl_priv *rtlpriv = rtl_priv(hw);
265
266 rtlpriv->dm.txpower_tracking = true;
267 rtlpriv->dm.txpower_track_control = true;
268 rtlpriv->dm.thermalvalue = 0;
269
270 rtlpriv->dm.ofdm_index[0] = 30;
271 rtlpriv->dm.cck_index = 20;
272
273 rtlpriv->dm.swing_idx_cck_base = rtlpriv->dm.cck_index;
274
275 rtlpriv->dm.swing_idx_ofdm_base[0] = rtlpriv->dm.ofdm_index[0];
276 rtlpriv->dm.delta_power_index[RF90_PATH_A] = 0;
277 rtlpriv->dm.delta_power_index_last[RF90_PATH_A] = 0;
278 rtlpriv->dm.power_index_offset[RF90_PATH_A] = 0;
279
280 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
281 " rtlpriv->dm.txpower_tracking = %d\n",
282 rtlpriv->dm.txpower_tracking);
283}
284
285static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
286{
287 struct rtl_priv *rtlpriv = rtl_priv(hw);
288
289 rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap;
290 rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, 0x800);
291 rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL;
292}
293
294void rtl8723be_dm_init(struct ieee80211_hw *hw)
295{
296 struct rtl_priv *rtlpriv = rtl_priv(hw);
297
298 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
299 rtl8723be_dm_diginit(hw);
300 rtl8723be_dm_init_rate_adaptive_mask(hw);
301 rtl8723_dm_init_edca_turbo(hw);
302 rtl8723_dm_init_dynamic_bb_powersaving(hw);
303 rtl8723_dm_init_dynamic_txpower(hw);
304 rtl8723be_dm_init_txpower_tracking(hw);
305 rtl8723be_dm_init_dynamic_atc_switch(hw);
306}
307
308static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw)
309{
310 struct rtl_priv *rtlpriv = rtl_priv(hw);
311 struct dig_t *rtl_dm_dig = &(rtlpriv->dm_digtable);
312 struct rtl_mac *mac = rtl_mac(rtlpriv);
313
314 /* Determine the minimum RSSI */
315 if ((mac->link_state < MAC80211_LINKED) &&
316 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
317 rtl_dm_dig->min_undec_pwdb_for_dm = 0;
318 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
319 "Not connected to any\n");
320 }
321 if (mac->link_state >= MAC80211_LINKED) {
322 if (mac->opmode == NL80211_IFTYPE_AP ||
323 mac->opmode == NL80211_IFTYPE_ADHOC) {
324 rtl_dm_dig->min_undec_pwdb_for_dm =
325 rtlpriv->dm.entry_min_undec_sm_pwdb;
326 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
327 "AP Client PWDB = 0x%lx\n",
328 rtlpriv->dm.entry_min_undec_sm_pwdb);
329 } else {
330 rtl_dm_dig->min_undec_pwdb_for_dm =
331 rtlpriv->dm.undec_sm_pwdb;
332 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
333 "STA Default Port PWDB = 0x%x\n",
334 rtl_dm_dig->min_undec_pwdb_for_dm);
335 }
336 } else {
337 rtl_dm_dig->min_undec_pwdb_for_dm =
338 rtlpriv->dm.entry_min_undec_sm_pwdb;
339 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
340 "AP Ext Port or disconnet PWDB = 0x%x\n",
341 rtl_dm_dig->min_undec_pwdb_for_dm);
342 }
343 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
344 rtl_dm_dig->min_undec_pwdb_for_dm);
345}
346
347static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw)
348{
349 struct rtl_priv *rtlpriv = rtl_priv(hw);
350 struct rtl_sta_info *drv_priv;
351 u8 h2c_parameter[3] = { 0 };
352 long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
353
354 /* AP & ADHOC & MESH */
355 spin_lock_bh(&rtlpriv->locks.entry_list_lock);
356 list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
357 if (drv_priv->rssi_stat.undec_sm_pwdb <
358 tmp_entry_min_pwdb)
359 tmp_entry_min_pwdb =
360 drv_priv->rssi_stat.undec_sm_pwdb;
361 if (drv_priv->rssi_stat.undec_sm_pwdb >
362 tmp_entry_max_pwdb)
363 tmp_entry_max_pwdb =
364 drv_priv->rssi_stat.undec_sm_pwdb;
365 }
366 spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
367
368 /* If associated entry is found */
369 if (tmp_entry_max_pwdb != 0) {
370 rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb;
371 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
372 "EntryMaxPWDB = 0x%lx(%ld)\n",
373 tmp_entry_max_pwdb, tmp_entry_max_pwdb);
374 } else {
375 rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
376 }
377 /* If associated entry is found */
378 if (tmp_entry_min_pwdb != 0xff) {
379 rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb;
380 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
381 "EntryMinPWDB = 0x%lx(%ld)\n",
382 tmp_entry_min_pwdb, tmp_entry_min_pwdb);
383 } else {
384 rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
385 }
386 /* Indicate Rx signal strength to FW. */
387 if (rtlpriv->dm.useramask) {
388 h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
389 h2c_parameter[1] = 0x20;
390 h2c_parameter[0] = 0;
391 rtl8723be_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
392 } else {
393 rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
394 }
395 rtl8723be_dm_find_minimum_rssi(hw);
396 rtlpriv->dm_digtable.rssi_val_min =
397 rtlpriv->dm_digtable.min_undec_pwdb_for_dm;
398}
399
400void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi)
401{
402 struct rtl_priv *rtlpriv = rtl_priv(hw);
403
404 if (rtlpriv->dm_digtable.cur_igvalue != current_igi) {
405 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, current_igi);
406 if (rtlpriv->phy.rf_type != RF_1T1R)
407 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, current_igi);
408 }
409 rtlpriv->dm_digtable.pre_igvalue = rtlpriv->dm_digtable.cur_igvalue;
410 rtlpriv->dm_digtable.cur_igvalue = current_igi;
411}
412
413static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
414{
415 struct rtl_priv *rtlpriv = rtl_priv(hw);
416 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
417 struct dig_t *dm_digtable = &(rtlpriv->dm_digtable);
418 u8 dig_dynamic_min, dig_maxofmin;
419 bool firstconnect, firstdisconnect;
420 u8 dm_dig_max, dm_dig_min;
421 u8 current_igi = dm_digtable->cur_igvalue;
422 u8 offset;
423
424 /* AP, BT */
425 if (mac->act_scanning)
426 return;
427
428 dig_dynamic_min = dm_digtable->dig_min_0;
429 firstconnect = (mac->link_state >= MAC80211_LINKED) &&
430 !dm_digtable->media_connect_0;
431 firstdisconnect = (mac->link_state < MAC80211_LINKED) &&
432 dm_digtable->media_connect_0;
433
434 dm_dig_max = 0x5a;
435 dm_dig_min = DM_DIG_MIN;
436 dig_maxofmin = DM_DIG_MAX_AP;
437
438 if (mac->link_state >= MAC80211_LINKED) {
439 if ((dm_digtable->rssi_val_min + 10) > dm_dig_max)
440 dm_digtable->rx_gain_max = dm_dig_max;
441 else if ((dm_digtable->rssi_val_min + 10) < dm_dig_min)
442 dm_digtable->rx_gain_max = dm_dig_min;
443 else
444 dm_digtable->rx_gain_max =
445 dm_digtable->rssi_val_min + 10;
446
447 if (rtlpriv->dm.one_entry_only) {
448 offset = 12;
449 if (dm_digtable->rssi_val_min - offset < dm_dig_min)
450 dig_dynamic_min = dm_dig_min;
451 else if (dm_digtable->rssi_val_min - offset >
452 dig_maxofmin)
453 dig_dynamic_min = dig_maxofmin;
454 else
455 dig_dynamic_min =
456 dm_digtable->rssi_val_min - offset;
457 } else {
458 dig_dynamic_min = dm_dig_min;
459 }
460 } else {
461 dm_digtable->rx_gain_max = dm_dig_max;
462 dig_dynamic_min = dm_dig_min;
463 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
464 }
465
466 if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
467 if (dm_digtable->large_fa_hit != 3)
468 dm_digtable->large_fa_hit++;
469 if (dm_digtable->forbidden_igi < current_igi) {
470 dm_digtable->forbidden_igi = current_igi;
471 dm_digtable->large_fa_hit = 1;
472 }
473
474 if (dm_digtable->large_fa_hit >= 3) {
475 if ((dm_digtable->forbidden_igi + 1) >
476 dm_digtable->rx_gain_max)
477 dm_digtable->rx_gain_min =
478 dm_digtable->rx_gain_max;
479 else
480 dm_digtable->rx_gain_min =
481 dm_digtable->forbidden_igi + 1;
482 dm_digtable->recover_cnt = 3600;
483 }
484 } else {
485 if (dm_digtable->recover_cnt != 0) {
486 dm_digtable->recover_cnt--;
487 } else {
488 if (dm_digtable->large_fa_hit < 3) {
489 if ((dm_digtable->forbidden_igi - 1) <
490 dig_dynamic_min) {
491 dm_digtable->forbidden_igi =
492 dig_dynamic_min;
493 dm_digtable->rx_gain_min =
494 dig_dynamic_min;
495 } else {
496 dm_digtable->forbidden_igi--;
497 dm_digtable->rx_gain_min =
498 dm_digtable->forbidden_igi + 1;
499 }
500 } else {
501 dm_digtable->large_fa_hit = 0;
502 }
503 }
504 }
505 if (dm_digtable->rx_gain_min > dm_digtable->rx_gain_max)
506 dm_digtable->rx_gain_min = dm_digtable->rx_gain_max;
507
508 if (mac->link_state >= MAC80211_LINKED) {
509 if (firstconnect) {
510 if (dm_digtable->rssi_val_min <= dig_maxofmin)
511 current_igi = dm_digtable->rssi_val_min;
512 else
513 current_igi = dig_maxofmin;
514
515 dm_digtable->large_fa_hit = 0;
516 } else {
517 if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
518 current_igi += 4;
519 else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
520 current_igi += 2;
521 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
522 current_igi -= 2;
523 }
524 } else {
525 if (firstdisconnect) {
526 current_igi = dm_digtable->rx_gain_min;
527 } else {
528 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
529 current_igi += 4;
530 else if (rtlpriv->falsealm_cnt.cnt_all > 8000)
531 current_igi += 2;
532 else if (rtlpriv->falsealm_cnt.cnt_all < 500)
533 current_igi -= 2;
534 }
535 }
536
537 if (current_igi > dm_digtable->rx_gain_max)
538 current_igi = dm_digtable->rx_gain_max;
539 else if (current_igi < dm_digtable->rx_gain_min)
540 current_igi = dm_digtable->rx_gain_min;
541
542 rtl8723be_dm_write_dig(hw, current_igi);
543 dm_digtable->media_connect_0 =
544 ((mac->link_state >= MAC80211_LINKED) ? true : false);
545 dm_digtable->dig_min_0 = dig_dynamic_min;
546}
547
548static void rtl8723be_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
549{
550 u32 ret_value;
551 struct rtl_priv *rtlpriv = rtl_priv(hw);
552 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
553
554 rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1);
555 rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 1);
556
557 ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE1_11N, MASKDWORD);
558 falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff;
559 falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16;
560
561 ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE2_11N, MASKDWORD);
562 falsealm_cnt->cnt_ofdm_cca = ret_value & 0xffff;
563 falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16;
564
565 ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE3_11N, MASKDWORD);
566 falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff;
567 falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16;
568
569 ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE4_11N, MASKDWORD);
570 falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff;
571
572 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
573 falsealm_cnt->cnt_rate_illegal +
574 falsealm_cnt->cnt_crc8_fail +
575 falsealm_cnt->cnt_mcs_fail +
576 falsealm_cnt->cnt_fast_fsync_fail +
577 falsealm_cnt->cnt_sb_search_fail;
578
579 rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(12), 1);
580 rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(14), 1);
581
582 ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_RST_11N, MASKBYTE0);
583 falsealm_cnt->cnt_cck_fail = ret_value;
584
585 ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_MSB_11N, MASKBYTE3);
586 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
587
588 ret_value = rtl_get_bbreg(hw, DM_REG_CCK_CCA_CNT_11N, MASKDWORD);
589 falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) |
590 ((ret_value & 0xff00) >> 8);
591
592 falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
593 falsealm_cnt->cnt_sb_search_fail +
594 falsealm_cnt->cnt_parity_fail +
595 falsealm_cnt->cnt_rate_illegal +
596 falsealm_cnt->cnt_crc8_fail +
597 falsealm_cnt->cnt_mcs_fail +
598 falsealm_cnt->cnt_cck_fail;
599
600 falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca +
601 falsealm_cnt->cnt_cck_cca;
602
603 rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 1);
604 rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 0);
605 rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 1);
606 rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 0);
607
608 rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 0);
609 rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 0);
610
611 rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 0);
612 rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 2);
613
614 rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
615 rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
616
617 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
618 "cnt_parity_fail = %d, cnt_rate_illegal = %d, "
619 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
620 falsealm_cnt->cnt_parity_fail,
621 falsealm_cnt->cnt_rate_illegal,
622 falsealm_cnt->cnt_crc8_fail,
623 falsealm_cnt->cnt_mcs_fail);
624
625 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
626 "cnt_ofdm_fail = %x, cnt_cck_fail = %x,"
627 " cnt_all = %x\n",
628 falsealm_cnt->cnt_ofdm_fail,
629 falsealm_cnt->cnt_cck_fail,
630 falsealm_cnt->cnt_all);
631}
632
633static void rtl8723be_dm_dynamic_txpower(struct ieee80211_hw *hw)
634{
635 /* 8723BE does not support ODM_BB_DYNAMIC_TXPWR*/
636 return;
637}
638
639static void rtl8723be_set_iqk_matrix(struct ieee80211_hw *hw, u8 ofdm_index,
640 u8 rfpath, long iqk_result_x,
641 long iqk_result_y)
642{
643 long ele_a = 0, ele_d, ele_c = 0, value32;
644
645 if (ofdm_index >= 43)
646 ofdm_index = 43 - 1;
647
648 ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000) >> 22;
649
650 if (iqk_result_x != 0) {
651 if ((iqk_result_x & 0x00000200) != 0)
652 iqk_result_x = iqk_result_x | 0xFFFFFC00;
653 ele_a = ((iqk_result_x * ele_d) >> 8) & 0x000003FF;
654
655 if ((iqk_result_y & 0x00000200) != 0)
656 iqk_result_y = iqk_result_y | 0xFFFFFC00;
657 ele_c = ((iqk_result_y * ele_d) >> 8) & 0x000003FF;
658
659 switch (rfpath) {
660 case RF90_PATH_A:
661 value32 = (ele_d << 22) |
662 ((ele_c & 0x3F) << 16) | ele_a;
663 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
664 value32);
665 value32 = (ele_c & 0x000003C0) >> 6;
666 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32);
667 value32 = ((iqk_result_x * ele_d) >> 7) & 0x01;
668 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
669 value32);
670 break;
671 default:
672 break;
673 }
674 } else {
675 switch (rfpath) {
676 case RF90_PATH_A:
677 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
678 ofdmswing_table[ofdm_index]);
679 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00);
680 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00);
681 break;
682 default:
683 break;
684 }
685 }
686}
687
688static void rtl8723be_dm_tx_power_track_set_power(struct ieee80211_hw *hw,
689 enum pwr_track_control_method method,
690 u8 rfpath, u8 idx)
691{
692 struct rtl_priv *rtlpriv = rtl_priv(hw);
693 struct rtl_phy *rtlphy = &(rtlpriv->phy);
694 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
695 u8 swing_idx_ofdm_limit = 36;
696
697 if (method == TXAGC) {
698 rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
699 } else if (method == BBSWING) {
700 if (rtldm->swing_idx_cck >= CCK_TABLE_SIZE)
701 rtldm->swing_idx_cck = CCK_TABLE_SIZE - 1;
702
703 if (!rtldm->cck_inch14) {
704 rtl_write_byte(rtlpriv, 0xa22,
705 cckswing_table_ch1ch13[rtldm->swing_idx_cck][0]);
706 rtl_write_byte(rtlpriv, 0xa23,
707 cckswing_table_ch1ch13[rtldm->swing_idx_cck][1]);
708 rtl_write_byte(rtlpriv, 0xa24,
709 cckswing_table_ch1ch13[rtldm->swing_idx_cck][2]);
710 rtl_write_byte(rtlpriv, 0xa25,
711 cckswing_table_ch1ch13[rtldm->swing_idx_cck][3]);
712 rtl_write_byte(rtlpriv, 0xa26,
713 cckswing_table_ch1ch13[rtldm->swing_idx_cck][4]);
714 rtl_write_byte(rtlpriv, 0xa27,
715 cckswing_table_ch1ch13[rtldm->swing_idx_cck][5]);
716 rtl_write_byte(rtlpriv, 0xa28,
717 cckswing_table_ch1ch13[rtldm->swing_idx_cck][6]);
718 rtl_write_byte(rtlpriv, 0xa29,
719 cckswing_table_ch1ch13[rtldm->swing_idx_cck][7]);
720 } else {
721 rtl_write_byte(rtlpriv, 0xa22,
722 cckswing_table_ch14[rtldm->swing_idx_cck][0]);
723 rtl_write_byte(rtlpriv, 0xa23,
724 cckswing_table_ch14[rtldm->swing_idx_cck][1]);
725 rtl_write_byte(rtlpriv, 0xa24,
726 cckswing_table_ch14[rtldm->swing_idx_cck][2]);
727 rtl_write_byte(rtlpriv, 0xa25,
728 cckswing_table_ch14[rtldm->swing_idx_cck][3]);
729 rtl_write_byte(rtlpriv, 0xa26,
730 cckswing_table_ch14[rtldm->swing_idx_cck][4]);
731 rtl_write_byte(rtlpriv, 0xa27,
732 cckswing_table_ch14[rtldm->swing_idx_cck][5]);
733 rtl_write_byte(rtlpriv, 0xa28,
734 cckswing_table_ch14[rtldm->swing_idx_cck][6]);
735 rtl_write_byte(rtlpriv, 0xa29,
736 cckswing_table_ch14[rtldm->swing_idx_cck][7]);
737 }
738
739 if (rfpath == RF90_PATH_A) {
740 if (rtldm->swing_idx_ofdm[RF90_PATH_A] <
741 swing_idx_ofdm_limit)
742 swing_idx_ofdm_limit =
743 rtldm->swing_idx_ofdm[RF90_PATH_A];
744
745 rtl8723be_set_iqk_matrix(hw,
746 rtldm->swing_idx_ofdm[rfpath], rfpath,
747 rtlphy->iqk_matrix[idx].value[0][0],
748 rtlphy->iqk_matrix[idx].value[0][1]);
749 } else if (rfpath == RF90_PATH_B) {
750 if (rtldm->swing_idx_ofdm[RF90_PATH_B] <
751 swing_idx_ofdm_limit)
752 swing_idx_ofdm_limit =
753 rtldm->swing_idx_ofdm[RF90_PATH_B];
754
755 rtl8723be_set_iqk_matrix(hw,
756 rtldm->swing_idx_ofdm[rfpath], rfpath,
757 rtlphy->iqk_matrix[idx].value[0][4],
758 rtlphy->iqk_matrix[idx].value[0][5]);
759 }
760 } else {
761 return;
762 }
763}
764
765static void txpwr_track_cb_therm(struct ieee80211_hw *hw)
766{
767 struct rtl_priv *rtlpriv = rtl_priv(hw);
768 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
769 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
770 u8 thermalvalue = 0, delta, delta_lck, delta_iqk;
771 u8 thermalvalue_avg_count = 0;
772 u32 thermalvalue_avg = 0;
773 int i = 0;
774
775 u8 ofdm_min_index = 6;
776 u8 index = 0;
777
778 char delta_swing_table_idx_tup_a[] = {
779 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
780 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10,
781 10, 11, 11, 12, 12, 13, 14, 15};
782 char delta_swing_table_idx_tdown_a[] = {
783 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
784 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9,
785 9, 10, 10, 11, 12, 13, 14, 15};
786
787 /*Initilization ( 7 steps in total)*/
788 rtlpriv->dm.txpower_trackinginit = true;
789 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
790 "rtl8723be_dm_txpower_tracking"
791 "_callback_thermalmeter\n");
792
793 thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00);
794 if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 ||
795 rtlefuse->eeprom_thermalmeter == 0xFF)
796 return;
797 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
798 "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
799 "eeprom_thermalmeter 0x%x\n",
800 thermalvalue, rtldm->thermalvalue,
801 rtlefuse->eeprom_thermalmeter);
802 /*3 Initialize ThermalValues of RFCalibrateInfo*/
803 if (!rtldm->thermalvalue) {
804 rtlpriv->dm.thermalvalue_lck = thermalvalue;
805 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
806 }
807
808 /*4 Calculate average thermal meter*/
809 rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue;
810 rtldm->thermalvalue_avg_index++;
811 if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8723BE)
812 rtldm->thermalvalue_avg_index = 0;
813
814 for (i = 0; i < AVG_THERMAL_NUM_8723BE; i++) {
815 if (rtldm->thermalvalue_avg[i]) {
816 thermalvalue_avg += rtldm->thermalvalue_avg[i];
817 thermalvalue_avg_count++;
818 }
819 }
820
821 if (thermalvalue_avg_count)
822 thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
823
824 /* 5 Calculate delta, delta_LCK, delta_IQK.*/
825 delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
826 (thermalvalue - rtlpriv->dm.thermalvalue) :
827 (rtlpriv->dm.thermalvalue - thermalvalue);
828 delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
829 (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
830 (rtlpriv->dm.thermalvalue_lck - thermalvalue);
831 delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
832 (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
833 (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
834
835 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
836 "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
837 "eeprom_thermalmeter 0x%x delta 0x%x "
838 "delta_lck 0x%x delta_iqk 0x%x\n",
839 thermalvalue, rtlpriv->dm.thermalvalue,
840 rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
841 /* 6 If necessary, do LCK.*/
842 if (delta_lck >= IQK_THRESHOLD) {
843 rtlpriv->dm.thermalvalue_lck = thermalvalue;
844 rtl8723be_phy_lc_calibrate(hw);
845 }
846
847 /* 7 If necessary, move the index of
848 * swing table to adjust Tx power.
849 */
850 if (delta > 0 && rtlpriv->dm.txpower_track_control) {
851 delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
852 (thermalvalue - rtlefuse->eeprom_thermalmeter) :
853 (rtlefuse->eeprom_thermalmeter - thermalvalue);
854
855 if (delta >= TXSCALE_TABLE_SIZE)
856 delta = TXSCALE_TABLE_SIZE - 1;
857 /* 7.1 Get the final CCK_index and
858 * OFDM_index for each swing table.
859 */
860 if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
861 rtldm->delta_power_index_last[RF90_PATH_A] =
862 rtldm->delta_power_index[RF90_PATH_A];
863 rtldm->delta_power_index[RF90_PATH_A] =
864 delta_swing_table_idx_tup_a[delta];
865 } else {
866 rtldm->delta_power_index_last[RF90_PATH_A] =
867 rtldm->delta_power_index[RF90_PATH_A];
868 rtldm->delta_power_index[RF90_PATH_A] =
869 -1 * delta_swing_table_idx_tdown_a[delta];
870 }
871
872 /* 7.2 Handle boundary conditions of index.*/
873 if (rtldm->delta_power_index[RF90_PATH_A] ==
874 rtldm->delta_power_index_last[RF90_PATH_A])
875 rtldm->power_index_offset[RF90_PATH_A] = 0;
876 else
877 rtldm->power_index_offset[RF90_PATH_A] =
878 rtldm->delta_power_index[RF90_PATH_A] -
879 rtldm->delta_power_index_last[RF90_PATH_A];
880
881 rtldm->ofdm_index[0] =
882 rtldm->swing_idx_ofdm_base[RF90_PATH_A] +
883 rtldm->power_index_offset[RF90_PATH_A];
884 rtldm->cck_index = rtldm->swing_idx_cck_base +
885 rtldm->power_index_offset[RF90_PATH_A];
886
887 rtldm->swing_idx_cck = rtldm->cck_index;
888 rtldm->swing_idx_ofdm[0] = rtldm->ofdm_index[0];
889
890 if (rtldm->ofdm_index[0] > OFDM_TABLE_SIZE - 1)
891 rtldm->ofdm_index[0] = OFDM_TABLE_SIZE - 1;
892 else if (rtldm->ofdm_index[0] < ofdm_min_index)
893 rtldm->ofdm_index[0] = ofdm_min_index;
894
895 if (rtldm->cck_index > CCK_TABLE_SIZE - 1)
896 rtldm->cck_index = CCK_TABLE_SIZE - 1;
897 else if (rtldm->cck_index < 0)
898 rtldm->cck_index = 0;
899 } else {
900 rtldm->power_index_offset[RF90_PATH_A] = 0;
901 }
902
903 if ((rtldm->power_index_offset[RF90_PATH_A] != 0) &&
904 (rtldm->txpower_track_control)) {
905 rtldm->done_txpower = true;
906 if (thermalvalue > rtlefuse->eeprom_thermalmeter)
907 rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
908 index);
909 else
910 rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
911 index);
912
913 rtldm->swing_idx_cck_base = rtldm->swing_idx_cck;
914 rtldm->swing_idx_ofdm_base[RF90_PATH_A] =
915 rtldm->swing_idx_ofdm[0];
916 rtldm->thermalvalue = thermalvalue;
917 }
918
919 if (delta_iqk >= IQK_THRESHOLD) {
920 rtldm->thermalvalue_iqk = thermalvalue;
921 rtl8723be_phy_iq_calibrate(hw, false);
922 }
923
924 rtldm->txpowercount = 0;
925 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
926}
927
928void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw)
929{
930 struct rtl_priv *rtlpriv = rtl_priv(hw);
931 static u8 tm_trigger;
932
933 if (!rtlpriv->dm.txpower_tracking)
934 return;
935
936 if (!tm_trigger) {
937 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16),
938 0x03);
939 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
940 "Trigger 8723be Thermal Meter!!\n");
941 tm_trigger = 1;
942 return;
943 } else {
944 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
945 "Schedule TxPowerTracking !!\n");
946 txpwr_track_cb_therm(hw);
947 tm_trigger = 0;
948 }
949}
950
951static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
952{
953 struct rtl_priv *rtlpriv = rtl_priv(hw);
954 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
955 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
956 struct rate_adaptive *ra = &(rtlpriv->ra);
957 struct ieee80211_sta *sta = NULL;
958 u32 low_rssithresh_for_ra = ra->low2high_rssi_thresh_for_ra40m;
959 u32 high_rssithresh_for_ra = ra->high_rssi_thresh_for_ra;
960 u8 go_up_gap = 5;
961
962 if (is_hal_stop(rtlhal)) {
963 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
964 "driver is going to unload\n");
965 return;
966 }
967
968 if (!rtlpriv->dm.useramask) {
969 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
970 "driver does not control rate adaptive mask\n");
971 return;
972 }
973
974 if (mac->link_state == MAC80211_LINKED &&
975 mac->opmode == NL80211_IFTYPE_STATION) {
976 switch (ra->pre_ratr_state) {
977 case DM_RATR_STA_MIDDLE:
978 high_rssithresh_for_ra += go_up_gap;
979 break;
980 case DM_RATR_STA_LOW:
981 high_rssithresh_for_ra += go_up_gap;
982 low_rssithresh_for_ra += go_up_gap;
983 break;
984 default:
985 break;
986 }
987
988 if (rtlpriv->dm.undec_sm_pwdb >
989 (long)high_rssithresh_for_ra)
990 ra->ratr_state = DM_RATR_STA_HIGH;
991 else if (rtlpriv->dm.undec_sm_pwdb >
992 (long)low_rssithresh_for_ra)
993 ra->ratr_state = DM_RATR_STA_MIDDLE;
994 else
995 ra->ratr_state = DM_RATR_STA_LOW;
996
997 if (ra->pre_ratr_state != ra->ratr_state) {
998 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
999 "RSSI = %ld\n",
1000 rtlpriv->dm.undec_sm_pwdb);
1001 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1002 "RSSI_LEVEL = %d\n", ra->ratr_state);
1003 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1004 "PreState = %d, CurState = %d\n",
1005 ra->pre_ratr_state, ra->ratr_state);
1006
1007 rcu_read_lock();
1008 sta = rtl_find_sta(hw, mac->bssid);
1009 if (sta)
1010 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
1011 ra->ratr_state);
1012 rcu_read_unlock();
1013
1014 ra->pre_ratr_state = ra->ratr_state;
1015 }
1016 }
1017}
1018
1019static bool rtl8723be_dm_is_edca_turbo_disable(struct ieee80211_hw *hw)
1020{
1021 struct rtl_priv *rtlpriv = rtl_priv(hw);
1022
1023 if (rtlpriv->cfg->ops->get_btc_status()) {
1024 if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv))
1025 return true;
1026 }
1027 if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
1028 return true;
1029
1030 return false;
1031}
1032
1033static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw)
1034{
1035 struct rtl_priv *rtlpriv = rtl_priv(hw);
1036 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1037 static u64 last_txok_cnt;
1038 static u64 last_rxok_cnt;
1039 u64 cur_txok_cnt = 0;
1040 u64 cur_rxok_cnt = 0;
1041 u32 edca_be_ul = 0x6ea42b;
1042 u32 edca_be_dl = 0x6ea42b;/*not sure*/
1043 u32 edca_be = 0x5ea42b;
1044 u32 iot_peer = 0;
1045 bool is_cur_rdlstate;
1046 bool last_is_cur_rdlstate = false;
1047 bool bias_on_rx = false;
1048 bool edca_turbo_on = false;
1049
1050 last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate;
1051
1052 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
1053 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
1054
1055 iot_peer = rtlpriv->mac80211.vendor;
1056 bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ?
1057 true : false;
1058 edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) &&
1059 (!rtlpriv->dm.disable_framebursting)) ?
1060 true : false;
1061
1062 if ((iot_peer == PEER_CISCO) &&
1063 (mac->mode == WIRELESS_MODE_N_24G)) {
1064 edca_be_dl = edca_setting_dl[iot_peer];
1065 edca_be_ul = edca_setting_ul[iot_peer];
1066 }
1067 if (rtl8723be_dm_is_edca_turbo_disable(hw))
1068 goto exit;
1069
1070 if (edca_turbo_on) {
1071 if (bias_on_rx)
1072 is_cur_rdlstate = (cur_txok_cnt > cur_rxok_cnt * 4) ?
1073 false : true;
1074 else
1075 is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ?
1076 true : false;
1077
1078 edca_be = (is_cur_rdlstate) ? edca_be_dl : edca_be_ul;
1079 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be);
1080 rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate;
1081 rtlpriv->dm.current_turbo_edca = true;
1082 } else {
1083 if (rtlpriv->dm.current_turbo_edca) {
1084 u8 tmp = AC0_BE;
1085 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
1086 &tmp);
1087 }
1088 rtlpriv->dm.current_turbo_edca = false;
1089 }
1090
1091exit:
1092 rtlpriv->dm.is_any_nonbepkts = false;
1093 last_txok_cnt = rtlpriv->stats.txbytesunicast;
1094 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
1095}
1096
1097static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
1098{
1099 struct rtl_priv *rtlpriv = rtl_priv(hw);
1100 u8 cur_cck_cca_thresh;
1101
1102 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
1103 if (rtlpriv->dm_digtable.rssi_val_min > 25) {
1104 cur_cck_cca_thresh = 0xcd;
1105 } else if ((rtlpriv->dm_digtable.rssi_val_min <= 25) &&
1106 (rtlpriv->dm_digtable.rssi_val_min > 10)) {
1107 cur_cck_cca_thresh = 0x83;
1108 } else {
1109 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
1110 cur_cck_cca_thresh = 0x83;
1111 else
1112 cur_cck_cca_thresh = 0x40;
1113 }
1114 } else {
1115 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
1116 cur_cck_cca_thresh = 0x83;
1117 else
1118 cur_cck_cca_thresh = 0x40;
1119 }
1120
1121 if (rtlpriv->dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh)
1122 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
1123
1124 rtlpriv->dm_digtable.pre_cck_cca_thres = rtlpriv->dm_digtable.cur_cck_cca_thres;
1125 rtlpriv->dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh;
1126 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
1127 "CCK cca thresh hold =%x\n",
1128 rtlpriv->dm_digtable.cur_cck_cca_thres);
1129}
1130
1131static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw)
1132{
1133 struct rtl_priv *rtlpriv = rtl_priv(hw);
1134 u8 reg_c50, reg_c58;
1135 bool fw_current_in_ps_mode = false;
1136
1137 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1138 (u8 *)(&fw_current_in_ps_mode));
1139 if (fw_current_in_ps_mode)
1140 return;
1141
1142 reg_c50 = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
1143 reg_c58 = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
1144
1145 if (reg_c50 > 0x28 && reg_c58 > 0x28) {
1146 if (!rtlpriv->rtlhal.pre_edcca_enable) {
1147 rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x03);
1148 rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x00);
1149 }
1150 } else if (reg_c50 < 0x25 && reg_c58 < 0x25) {
1151 if (rtlpriv->rtlhal.pre_edcca_enable) {
1152 rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x7f);
1153 rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x7f);
1154 }
1155 }
1156}
1157
1158static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
1159{
1160 struct rtl_priv *rtlpriv = rtl_priv(hw);
1161 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1162 u8 crystal_cap;
1163 u32 packet_count;
1164 int cfo_khz_a, cfo_khz_b, cfo_ave = 0, adjust_xtal = 0;
1165 int cfo_ave_diff;
1166
1167 if (rtlpriv->mac80211.link_state < MAC80211_LINKED) {
1168 if (rtldm->atc_status == ATC_STATUS_OFF) {
1169 rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
1170 ATC_STATUS_ON);
1171 rtldm->atc_status = ATC_STATUS_ON;
1172 }
1173 if (rtlpriv->cfg->ops->get_btc_status()) {
1174 if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) {
1175 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
1176 "odm_DynamicATCSwitch(): Disable"
1177 " CFO tracking for BT!!\n");
1178 return;
1179 }
1180 }
1181
1182 if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) {
1183 rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
1184 crystal_cap = rtldm->crystal_cap & 0x3f;
1185 rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
1186 (crystal_cap | (crystal_cap << 6)));
1187 }
1188 } else {
1189 cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
1190 cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280;
1191 packet_count = rtldm->packet_count;
1192
1193 if (packet_count == rtldm->packet_count_pre)
1194 return;
1195
1196 rtldm->packet_count_pre = packet_count;
1197
1198 if (rtlpriv->phy.rf_type == RF_1T1R)
1199 cfo_ave = cfo_khz_a;
1200 else
1201 cfo_ave = (int)(cfo_khz_a + cfo_khz_b) >> 1;
1202
1203 cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ?
1204 (rtldm->cfo_ave_pre - cfo_ave) :
1205 (cfo_ave - rtldm->cfo_ave_pre);
1206
1207 if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
1208 rtldm->large_cfo_hit = 1;
1209 return;
1210 } else {
1211 rtldm->large_cfo_hit = 0;
1212 }
1213
1214 rtldm->cfo_ave_pre = cfo_ave;
1215
1216 if (cfo_ave >= -rtldm->cfo_threshold &&
1217 cfo_ave <= rtldm->cfo_threshold && rtldm->is_freeze == 0) {
1218 if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL) {
1219 rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10;
1220 rtldm->is_freeze = 1;
1221 } else {
1222 rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
1223 }
1224 }
1225
1226 if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
1227 adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 1) + 1;
1228 else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) &&
1229 rtlpriv->dm.crystal_cap > 0)
1230 adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 1) - 1;
1231
1232 if (adjust_xtal != 0) {
1233 rtldm->is_freeze = 0;
1234 rtldm->crystal_cap += adjust_xtal;
1235
1236 if (rtldm->crystal_cap > 0x3f)
1237 rtldm->crystal_cap = 0x3f;
1238 else if (rtldm->crystal_cap < 0)
1239 rtldm->crystal_cap = 0;
1240
1241 crystal_cap = rtldm->crystal_cap & 0x3f;
1242 rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
1243 (crystal_cap | (crystal_cap << 6)));
1244 }
1245
1246 if (cfo_ave < CFO_THRESHOLD_ATC &&
1247 cfo_ave > -CFO_THRESHOLD_ATC) {
1248 if (rtldm->atc_status == ATC_STATUS_ON) {
1249 rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
1250 ATC_STATUS_OFF);
1251 rtldm->atc_status = ATC_STATUS_OFF;
1252 }
1253 } else {
1254 if (rtldm->atc_status == ATC_STATUS_OFF) {
1255 rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
1256 ATC_STATUS_ON);
1257 rtldm->atc_status = ATC_STATUS_ON;
1258 }
1259 }
1260 }
1261}
1262
1263static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw)
1264{
1265 struct rtl_priv *rtlpriv = rtl_priv(hw);
1266 struct rtl_sta_info *drv_priv;
1267 u8 cnt = 0;
1268
1269 rtlpriv->dm.one_entry_only = false;
1270
1271 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
1272 rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
1273 rtlpriv->dm.one_entry_only = true;
1274 return;
1275 }
1276
1277 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
1278 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC ||
1279 rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) {
1280 spin_lock_bh(&rtlpriv->locks.entry_list_lock);
1281 list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
1282 cnt++;
1283 }
1284 spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
1285
1286 if (cnt == 1)
1287 rtlpriv->dm.one_entry_only = true;
1288 }
1289}
1290
1291void rtl8723be_dm_watchdog(struct ieee80211_hw *hw)
1292{
1293 struct rtl_priv *rtlpriv = rtl_priv(hw);
1294 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1295 bool fw_current_inpsmode = false;
1296 bool fw_ps_awake = true;
1297
1298 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1299 (u8 *)(&fw_current_inpsmode));
1300
1301 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1302 (u8 *)(&fw_ps_awake));
1303
1304 if (ppsc->p2p_ps_info.p2p_ps_mode)
1305 fw_ps_awake = false;
1306
1307 if ((ppsc->rfpwr_state == ERFON) &&
1308 ((!fw_current_inpsmode) && fw_ps_awake) &&
1309 (!ppsc->rfchange_inprogress)) {
1310 rtl8723be_dm_common_info_self_update(hw);
1311 rtl8723be_dm_false_alarm_counter_statistics(hw);
1312 rtl8723be_dm_check_rssi_monitor(hw);
1313 rtl8723be_dm_dig(hw);
1314 rtl8723be_dm_dynamic_edcca(hw);
1315 rtl8723be_dm_cck_packet_detection_thresh(hw);
1316 rtl8723be_dm_refresh_rate_adaptive_mask(hw);
1317 rtl8723be_dm_check_edca_turbo(hw);
1318 rtl8723be_dm_dynamic_atc_switch(hw);
1319 rtl8723be_dm_check_txpower_tracking(hw);
1320 rtl8723be_dm_dynamic_txpower(hw);
1321 if (rtlpriv->cfg->ops->get_btc_status())
1322 rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
1323 }
1324 rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
1325}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
new file mode 100644
index 000000000000..c6c2f2a78a66
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
@@ -0,0 +1,310 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * Contact Information:
15 * wlanfae <wlanfae@realtek.com>
16 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
17 * Hsinchu 300, Taiwan.
18 *
19 * Larry Finger <Larry.Finger@lwfinger.net>
20 *
21 *****************************************************************************/
22
23#ifndef __RTL8723BE_DM_H__
24#define __RTL8723BE_DM_H__
25
26#define MAIN_ANT 0
27#define AUX_ANT 1
28#define MAIN_ANT_CG_TRX 1
29#define AUX_ANT_CG_TRX 0
30#define MAIN_ANT_CGCS_RX 0
31#define AUX_ANT_CGCS_RX 1
32
33#define TXSCALE_TABLE_SIZE 30
34
35/*RF REG LIST*/
36#define DM_REG_RF_MODE_11N 0x00
37#define DM_REG_RF_0B_11N 0x0B
38#define DM_REG_CHNBW_11N 0x18
39#define DM_REG_T_METER_11N 0x24
40#define DM_REG_RF_25_11N 0x25
41#define DM_REG_RF_26_11N 0x26
42#define DM_REG_RF_27_11N 0x27
43#define DM_REG_RF_2B_11N 0x2B
44#define DM_REG_RF_2C_11N 0x2C
45#define DM_REG_RXRF_A3_11N 0x3C
46#define DM_REG_T_METER_92D_11N 0x42
47#define DM_REG_T_METER_88E_11N 0x42
48
49/*BB REG LIST*/
50/*PAGE 8 */
51#define DM_REG_BB_CTRL_11N 0x800
52#define DM_REG_RF_PIN_11N 0x804
53#define DM_REG_PSD_CTRL_11N 0x808
54#define DM_REG_TX_ANT_CTRL_11N 0x80C
55#define DM_REG_BB_PWR_SAV5_11N 0x818
56#define DM_REG_CCK_RPT_FORMAT_11N 0x824
57#define DM_REG_RX_DEFUALT_A_11N 0x858
58#define DM_REG_RX_DEFUALT_B_11N 0x85A
59#define DM_REG_BB_PWR_SAV3_11N 0x85C
60#define DM_REG_ANTSEL_CTRL_11N 0x860
61#define DM_REG_RX_ANT_CTRL_11N 0x864
62#define DM_REG_PIN_CTRL_11N 0x870
63#define DM_REG_BB_PWR_SAV1_11N 0x874
64#define DM_REG_ANTSEL_PATH_11N 0x878
65#define DM_REG_BB_3WIRE_11N 0x88C
66#define DM_REG_SC_CNT_11N 0x8C4
67#define DM_REG_PSD_DATA_11N 0x8B4
68/*PAGE 9*/
69#define DM_REG_ANT_MAPPING1_11N 0x914
70#define DM_REG_ANT_MAPPING2_11N 0x918
71/*PAGE A*/
72#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00
73#define DM_REG_CCK_CCA_11N 0xA0A
74#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
75#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10
76#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14
77#define DM_REG_CCK_FILTER_PARA1_11N 0xA22
78#define DM_REG_CCK_FILTER_PARA2_11N 0xA23
79#define DM_REG_CCK_FILTER_PARA3_11N 0xA24
80#define DM_REG_CCK_FILTER_PARA4_11N 0xA25
81#define DM_REG_CCK_FILTER_PARA5_11N 0xA26
82#define DM_REG_CCK_FILTER_PARA6_11N 0xA27
83#define DM_REG_CCK_FILTER_PARA7_11N 0xA28
84#define DM_REG_CCK_FILTER_PARA8_11N 0xA29
85#define DM_REG_CCK_FA_RST_11N 0xA2C
86#define DM_REG_CCK_FA_MSB_11N 0xA58
87#define DM_REG_CCK_FA_LSB_11N 0xA5C
88#define DM_REG_CCK_CCA_CNT_11N 0xA60
89#define DM_REG_BB_PWR_SAV4_11N 0xA74
90/*PAGE B */
91#define DM_REG_LNA_SWITCH_11N 0xB2C
92#define DM_REG_PATH_SWITCH_11N 0xB30
93#define DM_REG_RSSI_CTRL_11N 0xB38
94#define DM_REG_CONFIG_ANTA_11N 0xB68
95#define DM_REG_RSSI_BT_11N 0xB9C
96/*PAGE C */
97#define DM_REG_OFDM_FA_HOLDC_11N 0xC00
98#define DM_REG_RX_PATH_11N 0xC04
99#define DM_REG_TRMUX_11N 0xC08
100#define DM_REG_OFDM_FA_RSTC_11N 0xC0C
101#define DM_REG_RXIQI_MATRIX_11N 0xC14
102#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
103#define DM_REG_IGI_A_11N 0xC50
104#define DM_REG_ANTDIV_PARA2_11N 0xC54
105#define DM_REG_IGI_B_11N 0xC58
106#define DM_REG_ANTDIV_PARA3_11N 0xC5C
107#define DM_REG_BB_PWR_SAV2_11N 0xC70
108#define DM_REG_RX_OFF_11N 0xC7C
109#define DM_REG_TXIQK_MATRIXA_11N 0xC80
110#define DM_REG_TXIQK_MATRIXB_11N 0xC88
111#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
112#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
113#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
114#define DM_REG_ANTDIV_PARA1_11N 0xCA4
115#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0
116/*PAGE D */
117#define DM_REG_OFDM_FA_RSTD_11N 0xD00
118#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0
119#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4
120#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8
121/*PAGE E */
122#define DM_REG_TXAGC_A_6_18_11N 0xE00
123#define DM_REG_TXAGC_A_24_54_11N 0xE04
124#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08
125#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10
126#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14
127#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18
128#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C
129#define DM_REG_FPGA0_IQK_11N 0xE28
130#define DM_REG_TXIQK_TONE_A_11N 0xE30
131#define DM_REG_RXIQK_TONE_A_11N 0xE34
132#define DM_REG_TXIQK_PI_A_11N 0xE38
133#define DM_REG_RXIQK_PI_A_11N 0xE3C
134#define DM_REG_TXIQK_11N 0xE40
135#define DM_REG_RXIQK_11N 0xE44
136#define DM_REG_IQK_AGC_PTS_11N 0xE48
137#define DM_REG_IQK_AGC_RSP_11N 0xE4C
138#define DM_REG_BLUETOOTH_11N 0xE6C
139#define DM_REG_RX_WAIT_CCA_11N 0xE70
140#define DM_REG_TX_CCK_RFON_11N 0xE74
141#define DM_REG_TX_CCK_BBON_11N 0xE78
142#define DM_REG_OFDM_RFON_11N 0xE7C
143#define DM_REG_OFDM_BBON_11N 0xE80
144#define DM_REG_TX2RX_11N 0xE84
145#define DM_REG_TX2TX_11N 0xE88
146#define DM_REG_RX_CCK_11N 0xE8C
147#define DM_REG_RX_OFDM_11N 0xED0
148#define DM_REG_RX_WAIT_RIFS_11N 0xED4
149#define DM_REG_RX2RX_11N 0xED8
150#define DM_REG_STANDBY_11N 0xEDC
151#define DM_REG_SLEEP_11N 0xEE0
152#define DM_REG_PMPD_ANAEN_11N 0xEEC
153
154/*MAC REG LIST*/
155#define DM_REG_BB_RST_11N 0x02
156#define DM_REG_ANTSEL_PIN_11N 0x4C
157#define DM_REG_EARLY_MODE_11N 0x4D0
158#define DM_REG_RSSI_MONITOR_11N 0x4FE
159#define DM_REG_EDCA_VO_11N 0x500
160#define DM_REG_EDCA_VI_11N 0x504
161#define DM_REG_EDCA_BE_11N 0x508
162#define DM_REG_EDCA_BK_11N 0x50C
163#define DM_REG_TXPAUSE_11N 0x522
164#define DM_REG_RESP_TX_11N 0x6D8
165#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0
166#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4
167
168/*DIG Related*/
169#define DM_BIT_IGI_11N 0x0000007F
170
171#define HAL_DM_DIG_DISABLE BIT(0)
172#define HAL_DM_HIPWR_DISABLE BIT(1)
173
174#define OFDM_TABLE_LENGTH 43
175#define CCK_TABLE_LENGTH 33
176
177#define OFDM_TABLE_SIZE 37
178#define CCK_TABLE_SIZE 33
179
180#define BW_AUTO_SWITCH_HIGH_LOW 25
181#define BW_AUTO_SWITCH_LOW_HIGH 30
182
183#define DM_DIG_THRESH_HIGH 40
184#define DM_DIG_THRESH_LOW 35
185
186#define DM_FALSEALARM_THRESH_LOW 400
187#define DM_FALSEALARM_THRESH_HIGH 1000
188
189#define DM_DIG_MAX 0x3e
190#define DM_DIG_MIN 0x1e
191
192#define DM_DIG_MAX_AP 0x32
193#define DM_DIG_MIN_AP 0x20
194
195#define DM_DIG_FA_UPPER 0x3e
196#define DM_DIG_FA_LOWER 0x1e
197#define DM_DIG_FA_TH0 0x200
198#define DM_DIG_FA_TH1 0x300
199#define DM_DIG_FA_TH2 0x400
200
201#define DM_DIG_BACKOFF_MAX 12
202#define DM_DIG_BACKOFF_MIN -4
203#define DM_DIG_BACKOFF_DEFAULT 10
204
205#define RXPATHSELECTION_DIFF_TH 18
206
207#define DM_RATR_STA_INIT 0
208#define DM_RATR_STA_HIGH 1
209#define DM_RATR_STA_MIDDLE 2
210#define DM_RATR_STA_LOW 3
211
212#define CTS2SELF_THVAL 30
213#define REGC38_TH 20
214
215#define TXHIGHPWRLEVEL_NORMAL 0
216#define TXHIGHPWRLEVEL_LEVEL1 1
217#define TXHIGHPWRLEVEL_LEVEL2 2
218#define TXHIGHPWRLEVEL_BT1 3
219#define TXHIGHPWRLEVEL_BT2 4
220
221#define DM_TYPE_BYFW 0
222#define DM_TYPE_BYDRIVER 1
223
224#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
225#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
226#define TXPWRTRACK_MAX_IDX 6
227
228/* Dynamic ATC switch */
229#define ATC_STATUS_OFF 0x0 /* enable */
230#define ATC_STATUS_ON 0x1 /* disable */
231#define CFO_THRESHOLD_XTAL 10 /* kHz */
232#define CFO_THRESHOLD_ATC 80 /* kHz */
233
234enum FAT_STATE {
235 FAT_NORMAL_STATE = 0,
236 FAT_TRAINING_STATE = 1,
237};
238
239enum tag_dynamic_init_gain_operation_type_definition {
240 DIG_TYPE_THRESH_HIGH = 0,
241 DIG_TYPE_THRESH_LOW = 1,
242 DIG_TYPE_BACKOFF = 2,
243 DIG_TYPE_RX_GAIN_MIN = 3,
244 DIG_TYPE_RX_GAIN_MAX = 4,
245 DIG_TYPE_ENABLE = 5,
246 DIG_TYPE_DISABLE = 6,
247 DIG_OP_TYPE_MAX
248};
249
250enum dm_1r_cca_e {
251 CCA_1R = 0,
252 CCA_2R = 1,
253 CCA_MAX = 2,
254};
255
256enum dm_rf_e {
257 RF_SAVE = 0,
258 RF_NORMAL = 1,
259 RF_MAX = 2,
260};
261
262enum dm_sw_ant_switch_e {
263 ANS_ANTENNA_B = 1,
264 ANS_ANTENNA_A = 2,
265 ANS_ANTENNA_MAX = 3,
266};
267
268enum dm_dig_ext_port_alg_e {
269 DIG_EXT_PORT_STAGE_0 = 0,
270 DIG_EXT_PORT_STAGE_1 = 1,
271 DIG_EXT_PORT_STAGE_2 = 2,
272 DIG_EXT_PORT_STAGE_3 = 3,
273 DIG_EXT_PORT_STAGE_MAX = 4,
274};
275
276enum dm_dig_connect_e {
277 DIG_STA_DISCONNECT = 0,
278 DIG_STA_CONNECT = 1,
279 DIG_STA_BEFORE_CONNECT = 2,
280 DIG_MULTISTA_DISCONNECT = 3,
281 DIG_MULTISTA_CONNECT = 4,
282 DIG_CONNECT_MAX
283};
284
285enum pwr_track_control_method {
286 BBSWING,
287 TXAGC
288};
289
290#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
291#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
292#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
293#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
294#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
295
296void rtl8723be_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc,
297 u32 mac_id);
298void rtl8723be_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux,
299 u32 mac_id, u32 rx_pwdb_all);
300void rtl8723be_dm_fast_antenna_trainning_callback(unsigned long data);
301void rtl8723be_dm_init(struct ieee80211_hw *hw);
302void rtl8723be_dm_watchdog(struct ieee80211_hw *hw);
303void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi);
304void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw);
305void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
306void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
307 u8 *pdirection, u32 *poutwrite_val);
308void rtl8723be_dm_init_edca_turbo(struct ieee80211_hw *hw);
309
310#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
new file mode 100644
index 000000000000..f856be6fc138
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
@@ -0,0 +1,620 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../pci.h"
28#include "../base.h"
29#include "reg.h"
30#include "def.h"
31#include "fw.h"
32#include "../rtl8723com/fw_common.h"
33
34static bool _rtl8723be_check_fw_read_last_h2c(struct ieee80211_hw *hw,
35 u8 boxnum)
36{
37 struct rtl_priv *rtlpriv = rtl_priv(hw);
38 u8 val_hmetfr;
39 bool result = false;
40
41 val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
42 if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
43 result = true;
44 return result;
45}
46
47static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
48 u32 cmd_len, u8 *p_cmdbuffer)
49{
50 struct rtl_priv *rtlpriv = rtl_priv(hw);
51 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
52 u8 boxnum;
53 u16 box_reg = 0, box_extreg = 0;
54 u8 u1b_tmp;
55 bool isfw_read = false;
56 u8 buf_index = 0;
57 bool bwrite_sucess = false;
58 u8 wait_h2c_limit = 100;
59 u8 wait_writeh2c_limit = 100;
60 u8 boxcontent[4], boxextcontent[4];
61 u32 h2c_waitcounter = 0;
62 unsigned long flag;
63 u8 idx;
64
65 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
66
67 while (true) {
68 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
69 if (rtlhal->h2c_setinprogress) {
70 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
71 "H2C set in progress! Wait to set.."
72 "element_id(%d).\n", element_id);
73
74 while (rtlhal->h2c_setinprogress) {
75 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
76 flag);
77 h2c_waitcounter++;
78 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
79 "Wait 100 us (%d times)...\n",
80 h2c_waitcounter);
81 udelay(100);
82
83 if (h2c_waitcounter > 1000)
84 return;
85 spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
86 flag);
87 }
88 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
89 } else {
90 rtlhal->h2c_setinprogress = true;
91 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
92 break;
93 }
94 }
95 while (!bwrite_sucess) {
96 wait_writeh2c_limit--;
97 if (wait_writeh2c_limit == 0) {
98 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
99 "Write H2C fail because no trigger "
100 "for FW INT!\n");
101 break;
102 }
103 boxnum = rtlhal->last_hmeboxnum;
104 switch (boxnum) {
105 case 0:
106 box_reg = REG_HMEBOX_0;
107 box_extreg = REG_HMEBOX_EXT_0;
108 break;
109 case 1:
110 box_reg = REG_HMEBOX_1;
111 box_extreg = REG_HMEBOX_EXT_1;
112 break;
113 case 2:
114 box_reg = REG_HMEBOX_2;
115 box_extreg = REG_HMEBOX_EXT_2;
116 break;
117 case 3:
118 box_reg = REG_HMEBOX_3;
119 box_extreg = REG_HMEBOX_EXT_3;
120 break;
121 default:
122 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
123 "switch case not processed\n");
124 break;
125 }
126 isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, boxnum);
127 while (!isfw_read) {
128 wait_h2c_limit--;
129 if (wait_h2c_limit == 0) {
130 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
131 "Wating too long for FW read "
132 "clear HMEBox(%d)!\n", boxnum);
133 break;
134 }
135 udelay(10);
136
137 isfw_read = _rtl8723be_check_fw_read_last_h2c(hw,
138 boxnum);
139 u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
140 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
141 "Wating for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
142 boxnum, u1b_tmp);
143 }
144 if (!isfw_read) {
145 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
146 "Write H2C register BOX[%d] fail!!!!! "
147 "Fw do not read.\n", boxnum);
148 break;
149 }
150 memset(boxcontent, 0, sizeof(boxcontent));
151 memset(boxextcontent, 0, sizeof(boxextcontent));
152 boxcontent[0] = element_id;
153 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
154 "Write element_id box_reg(%4x) = %2x\n",
155 box_reg, element_id);
156
157 switch (cmd_len) {
158 case 1:
159 case 2:
160 case 3:
161 /*boxcontent[0] &= ~(BIT(7));*/
162 memcpy((u8 *)(boxcontent) + 1,
163 p_cmdbuffer + buf_index, cmd_len);
164
165 for (idx = 0; idx < 4; idx++) {
166 rtl_write_byte(rtlpriv, box_reg + idx,
167 boxcontent[idx]);
168 }
169 break;
170 case 4:
171 case 5:
172 case 6:
173 case 7:
174 /*boxcontent[0] |= (BIT(7));*/
175 memcpy((u8 *)(boxextcontent),
176 p_cmdbuffer + buf_index+3, cmd_len-3);
177 memcpy((u8 *)(boxcontent) + 1,
178 p_cmdbuffer + buf_index, 3);
179
180 for (idx = 0; idx < 4; idx++) {
181 rtl_write_byte(rtlpriv, box_extreg + idx,
182 boxextcontent[idx]);
183 }
184 for (idx = 0; idx < 4; idx++) {
185 rtl_write_byte(rtlpriv, box_reg + idx,
186 boxcontent[idx]);
187 }
188 break;
189 default:
190 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
191 "switch case not process\n");
192 break;
193 }
194 bwrite_sucess = true;
195
196 rtlhal->last_hmeboxnum = boxnum + 1;
197 if (rtlhal->last_hmeboxnum == 4)
198 rtlhal->last_hmeboxnum = 0;
199
200 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
201 "pHalData->last_hmeboxnum = %d\n",
202 rtlhal->last_hmeboxnum);
203 }
204 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
205 rtlhal->h2c_setinprogress = false;
206 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
207
208 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
209}
210
211void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
212 u32 cmd_len, u8 *p_cmdbuffer)
213{
214 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
215 u32 tmp_cmdbuf[2];
216
217 if (!rtlhal->fw_ready) {
218 RT_ASSERT(false,
219 "return H2C cmd because of Fw download fail!!!\n");
220 return;
221 }
222 memset(tmp_cmdbuf, 0, 8);
223 memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
224 _rtl8723be_fill_h2c_command(hw, element_id, cmd_len,
225 (u8 *)&tmp_cmdbuf);
226 return;
227}
228
229void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
230{
231 struct rtl_priv *rtlpriv = rtl_priv(hw);
232 u8 u1_h2c_set_pwrmode[H2C_8723BE_PWEMODE_LENGTH] = { 0 };
233 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
234 u8 rlbm, power_state = 0;
235 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
236
237 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
238 rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM = 2.*/
239 SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
240 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
241 (rtlpriv->mac80211.p2p) ?
242 ppsc->smart_ps : 1);
243 SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
244 ppsc->reg_max_lps_awakeintvl);
245 SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
246 if (mode == FW_PS_ACTIVE_MODE)
247 power_state |= FW_PWR_STATE_ACTIVE;
248 else
249 power_state |= FW_PWR_STATE_RF_OFF;
250 SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
251
252 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
253 "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
254 u1_h2c_set_pwrmode, H2C_8723BE_PWEMODE_LENGTH);
255 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_SETPWRMODE,
256 H2C_8723BE_PWEMODE_LENGTH,
257 u1_h2c_set_pwrmode);
258}
259
260static bool _rtl8723be_cmd_send_packet(struct ieee80211_hw *hw,
261 struct sk_buff *skb)
262{
263 struct rtl_priv *rtlpriv = rtl_priv(hw);
264 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
265 struct rtl8192_tx_ring *ring;
266 struct rtl_tx_desc *pdesc;
267 struct sk_buff *pskb = NULL;
268 u8 own;
269 unsigned long flags;
270
271 ring = &rtlpci->tx_ring[BEACON_QUEUE];
272
273 pskb = __skb_dequeue(&ring->queue);
274 if (pskb)
275 kfree_skb(pskb);
276
277 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
278
279 pdesc = &ring->desc[0];
280 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
281
282 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
283
284 __skb_queue_tail(&ring->queue, skb);
285
286 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
287
288 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
289
290 return true;
291}
292#define BEACON_PG 0 /* ->1 */
293#define PSPOLL_PG 2
294#define NULL_PG 3
295#define PROBERSP_PG 4 /* ->5 */
296
297#define TOTAL_RESERVED_PKT_LEN 768
298
299static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
300 /* page 0 beacon */
301 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
302 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
303 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x20, 0x00,
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
305 0x64, 0x00, 0x10, 0x04, 0x00, 0x05, 0x54, 0x65,
306 0x73, 0x74, 0x32, 0x01, 0x08, 0x82, 0x84, 0x0B,
307 0x16, 0x24, 0x30, 0x48, 0x6C, 0x03, 0x01, 0x06,
308 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32,
309 0x04, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C,
310 0x09, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x3D, 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C,
314 0x02, 0x02, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50,
315 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04,
316 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, 0x01, 0x00,
317
318 /* page 1 beacon */
319 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
335
336 /* page 2 ps-poll */
337 0xA4, 0x10, 0x01, 0xC0, 0xEC, 0x1A, 0x59, 0x0B,
338 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
348 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
353
354 /* page 3 null */
355 0x48, 0x01, 0x00, 0x00, 0xEC, 0x1A, 0x59, 0x0B,
356 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
357 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x00, 0x00,
358 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
366 0x72, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
371
372 /* page 4 probe_resp */
373 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
374 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
375 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
376 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
377 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
378 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
379 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
380 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
381 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
382 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
383 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
386 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
387 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
389
390 /* page 5 probe_resp */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
407};
408
409void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
410 bool dl_finished)
411{
412 struct rtl_priv *rtlpriv = rtl_priv(hw);
413 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
414 struct sk_buff *skb = NULL;
415
416 u32 totalpacketlen;
417 bool rtstatus;
418 u8 u1rsvdpageloc[5] = { 0 };
419 bool dlok = false;
420
421 u8 *beacon;
422 u8 *p_pspoll;
423 u8 *nullfunc;
424 u8 *p_probersp;
425 /*---------------------------------------------------------
426 * (1) beacon
427 *---------------------------------------------------------
428 */
429 beacon = &reserved_page_packet[BEACON_PG * 128];
430 SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
431 SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
432
433 /*-------------------------------------------------------
434 * (2) ps-poll
435 *-------------------------------------------------------
436 */
437 p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
438 SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
439 SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
440 SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
441
442 SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
443
444 /*--------------------------------------------------------
445 * (3) null data
446 *--------------------------------------------------------
447 */
448 nullfunc = &reserved_page_packet[NULL_PG * 128];
449 SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
450 SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
451 SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
452
453 SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
454
455 /*---------------------------------------------------------
456 * (4) probe response
457 *---------------------------------------------------------
458 */
459 p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
460 SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
461 SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
462 SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
463
464 SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
465
466 totalpacketlen = TOTAL_RESERVED_PKT_LEN;
467
468 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
469 "rtl8723be_set_fw_rsvdpagepkt(): "
470 "HW_VAR_SET_TX_CMD: ALL\n",
471 &reserved_page_packet[0], totalpacketlen);
472 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
473 "rtl8723be_set_fw_rsvdpagepkt(): "
474 "HW_VAR_SET_TX_CMD: ALL\n", u1rsvdpageloc, 3);
475
476
477 skb = dev_alloc_skb(totalpacketlen);
478 memcpy((u8 *)skb_put(skb, totalpacketlen),
479 &reserved_page_packet, totalpacketlen);
480
481 rtstatus = _rtl8723be_cmd_send_packet(hw, skb);
482
483 if (rtstatus)
484 dlok = true;
485
486 if (dlok) {
487 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
488 "Set RSVD page location to Fw.\n");
489 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
490 u1rsvdpageloc, 3);
491 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RSVDPAGE,
492 sizeof(u1rsvdpageloc), u1rsvdpageloc);
493 } else {
494 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
495 "Set RSVD page location to Fw FAIL!!!!!!.\n");
496 }
497}
498
499/*Should check FW support p2p or not.*/
500static void rtl8723be_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw,
501 u8 ctwindow)
502{
503 u8 u1_ctwindow_period[1] = {ctwindow};
504
505 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_CTW_CMD, 1,
506 u1_ctwindow_period);
507}
508
509void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
510 u8 p2p_ps_state)
511{
512 struct rtl_priv *rtlpriv = rtl_priv(hw);
513 struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
514 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
515 struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
516 struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
517 u8 i;
518 u16 ctwindow;
519 u32 start_time, tsf_low;
520
521 switch (p2p_ps_state) {
522 case P2P_PS_DISABLE:
523 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
524 memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t));
525 break;
526 case P2P_PS_ENABLE:
527 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
528 /* update CTWindow value. */
529 if (p2pinfo->ctwindow > 0) {
530 p2p_ps_offload->ctwindow_en = 1;
531 ctwindow = p2pinfo->ctwindow;
532 rtl8723be_set_p2p_ctw_period_cmd(hw, ctwindow);
533 }
534 /* hw only support 2 set of NoA */
535 for (i = 0; i < p2pinfo->noa_num; i++) {
536 /* To control the register setting
537 * for which NOA
538 */
539 rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
540 if (i == 0)
541 p2p_ps_offload->noa0_en = 1;
542 else
543 p2p_ps_offload->noa1_en = 1;
544
545 /* config P2P NoA Descriptor Register */
546 rtl_write_dword(rtlpriv, 0x5E0,
547 p2pinfo->noa_duration[i]);
548 rtl_write_dword(rtlpriv, 0x5E4,
549 p2pinfo->noa_interval[i]);
550
551 /*Get Current TSF value */
552 tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
553
554 start_time = p2pinfo->noa_start_time[i];
555 if (p2pinfo->noa_count_type[i] != 1) {
556 while (start_time <= (tsf_low + (50 * 1024))) {
557 start_time += p2pinfo->noa_interval[i];
558 if (p2pinfo->noa_count_type[i] != 255)
559 p2pinfo->noa_count_type[i]--;
560 }
561 }
562 rtl_write_dword(rtlpriv, 0x5E8, start_time);
563 rtl_write_dword(rtlpriv, 0x5EC,
564 p2pinfo->noa_count_type[i]);
565 }
566 if ((p2pinfo->opp_ps == 1) ||
567 (p2pinfo->noa_num > 0)) {
568 /* rst p2p circuit */
569 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
570
571 p2p_ps_offload->offload_en = 1;
572
573 if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
574 p2p_ps_offload->role = 1;
575 p2p_ps_offload->allstasleep = 0;
576 } else {
577 p2p_ps_offload->role = 0;
578 }
579 p2p_ps_offload->discovery = 0;
580 }
581 break;
582 case P2P_PS_SCAN:
583 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
584 p2p_ps_offload->discovery = 1;
585 break;
586 case P2P_PS_SCAN_DONE:
587 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
588 p2p_ps_offload->discovery = 0;
589 p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
590 break;
591 default:
592 break;
593 }
594 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_OFFLOAD, 1,
595 (u8 *)p2p_ps_offload);
596}
597
598void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
599{
600 u8 u1_joinbssrpt_parm[1] = { 0 };
601
602 SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
603
604 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_JOINBSSRPT, 1,
605 u1_joinbssrpt_parm);
606}
607
608void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
609 u8 ap_offload_enable)
610{
611 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
612 u8 u1_apoffload_parm[H2C_8723BE_AP_OFFLOAD_LENGTH] = { 0 };
613
614 SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
615 SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid);
616 SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
617
618 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_AP_OFFLOAD,
619 H2C_8723BE_AP_OFFLOAD_LENGTH, u1_apoffload_parm);
620}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h
new file mode 100644
index 000000000000..31eec281e446
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h
@@ -0,0 +1,248 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 * Larry Finger <Larry.Finger@lwfinger.net>
22 *
23 *****************************************************************************/
24
25#ifndef __RTL8723BE__FW__H__
26#define __RTL8723BE__FW__H__
27
28#define FW_8192C_SIZE 0x8000
29#define FW_8192C_START_ADDRESS 0x1000
30#define FW_8192C_END_ADDRESS 0x5FFF
31#define FW_8192C_PAGE_SIZE 4096
32#define FW_8192C_POLLING_DELAY 5
33#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
34
35#define IS_FW_HEADER_EXIST(_pfwhdr) \
36 ((_pfwhdr->signature&0xFFF0) == 0x5300)
37#define USE_OLD_WOWLAN_DEBUG_FW 0
38
39#define H2C_8723BE_RSVDPAGE_LOC_LEN 5
40#define H2C_8723BE_PWEMODE_LENGTH 5
41#define H2C_8723BE_JOINBSSRPT_LENGTH 1
42#define H2C_8723BE_AP_OFFLOAD_LENGTH 3
43#define H2C_8723BE_WOWLAN_LENGTH 3
44#define H2C_8723BE_KEEP_ALIVE_CTRL_LENGTH 3
45#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
46#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 1
47#else
48#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 3
49#endif
50#define H2C_8723BE_AOAC_GLOBAL_INFO_LEN 2
51#define H2C_8723BE_AOAC_RSVDPAGE_LOC_LEN 7
52
53
54/* Fw PS state for RPWM.
55*BIT[2:0] = HW state
56*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state
57*BIT[4] = sub-state
58*/
59#define FW_PS_GO_ON BIT(0)
60#define FW_PS_TX_NULL BIT(1)
61#define FW_PS_RF_ON BIT(2)
62#define FW_PS_REGISTER_ACTIVE BIT(3)
63
64#define FW_PS_DPS BIT(0)
65#define FW_PS_LCLK (FW_PS_DPS)
66#define FW_PS_RF_OFF BIT(1)
67#define FW_PS_ALL_ON BIT(2)
68#define FW_PS_ST_ACTIVE BIT(3)
69#define FW_PS_ISR_ENABLE BIT(4)
70#define FW_PS_IMR_ENABLE BIT(5)
71
72
73#define FW_PS_ACK BIT(6)
74#define FW_PS_TOGGLE BIT(7)
75
76 /* 88E RPWM value*/
77 /* BIT[0] = 1: 32k, 0: 40M*/
78#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/
79#define FW_PS_CLOCK_ON 0 /*40M*/
80
81#define FW_PS_STATE_MASK (0x0F)
82#define FW_PS_STATE_HW_MASK (0x07)
83/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
84#define FW_PS_STATE_INT_MASK (0x3F)
85
86#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x))
87#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x))
88#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x))
89#define FW_PS_ISR_VAL(x) ((x) & 0x70)
90#define FW_PS_IMR_MASK(x) ((x) & 0xDF)
91#define FW_PS_KEEP_IMR(x) ((x) & 0x20)
92
93
94#define FW_PS_STATE_S0 (FW_PS_DPS)
95#define FW_PS_STATE_S1 (FW_PS_LCLK)
96#define FW_PS_STATE_S2 (FW_PS_RF_OFF)
97#define FW_PS_STATE_S3 (FW_PS_ALL_ON)
98#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
99
100/* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/
101#define FW_PS_STATE_ALL_ON_88E (FW_PS_CLOCK_ON)
102/* (FW_PS_RF_ON)*/
103#define FW_PS_STATE_RF_ON_88E (FW_PS_CLOCK_ON)
104/* 0x0*/
105#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON)
106/* (FW_PS_STATE_RF_OFF)*/
107#define FW_PS_STATE_RF_OFF_LOW_PWR_88E (FW_PS_CLOCK_OFF)
108
109#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4)
110#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3)
111#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2)
112#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1)
113
114
115/* For 88E H2C PwrMode Cmd ID 5.*/
116#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
117#define FW_PWR_STATE_RF_OFF 0
118
119#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK)
120#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON))
121#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON))
122#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE))
123#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40)
124
125#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
126
127#define IS_IN_LOW_POWER_STATE_88E(fwpsstate) \
128 (FW_PS_STATE(fwpsstate) == FW_PS_CLOCK_OFF)
129
130#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
131#define FW_PWR_STATE_RF_OFF 0
132
133#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
134
135#define SET_88E_H2CCMD_WOWLAN_FUNC_ENABLE(__ph2ccmd, __val) \
136 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
137#define SET_88E_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__ph2ccmd, __val) \
138 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
139#define SET_88E_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__ph2ccmd, __val) \
140 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
141#define SET_88E_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__ph2ccmd, __val) \
142 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
143#define SET_88E_H2CCMD_WOWLAN_ALL_PKT_DROP(__ph2ccmd, __val) \
144 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 4, 1, __val)
145#define SET_88E_H2CCMD_WOWLAN_GPIO_ACTIVE(__ph2ccmd, __val) \
146 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 5, 1, __val)
147#define SET_88E_H2CCMD_WOWLAN_REKEY_WAKE_UP(__ph2ccmd, __val) \
148 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 6, 1, __val)
149#define SET_88E_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__ph2ccmd, __val) \
150 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 7, 1, __val)
151#define SET_88E_H2CCMD_WOWLAN_GPIONUM(__ph2ccmd, __val) \
152 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
153#define SET_88E_H2CCMD_WOWLAN_GPIO_DURATION(__ph2ccmd, __val) \
154 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
155
156
157#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
158 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
159#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \
160 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val)
161#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
162 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val)
163#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \
164 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
165#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \
166 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
167#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
168 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
169#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \
170 LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
171
172#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
173 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
174#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
175 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
176#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
177 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
178#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
179 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
180
181/* AP_OFFLOAD */
182#define SET_H2CCMD_AP_OFFLOAD_ON(__ph2ccmd, __val) \
183 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
184#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__ph2ccmd, __val) \
185 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
186#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__ph2ccmd, __val) \
187 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
188#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__ph2ccmd, __val) \
189 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
190
191/* Keep Alive Control*/
192#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__ph2ccmd, __val) \
193 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
194#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__ph2ccmd, __val)\
195 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
196#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__ph2ccmd, __val) \
197 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
198
199/*REMOTE_WAKE_CTRL */
200#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__ph2ccmd, __val) \
201 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
202#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
203#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__ph2ccmd, __val)\
204 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
205#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__ph2ccmd, __val)\
206 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
207#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__ph2ccmd, __val)\
208 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
209#else
210#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
211 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
212#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__ph2ccmd, __val) \
213 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
214#endif
215
216/* GTK_OFFLOAD */
217#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
218 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
219#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__ph2ccmd, __val) \
220 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
221
222/* AOAC_RSVDPAGE_LOC */
223#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_REM_WAKE_CTRL_INFO(__ph2ccmd, __val)\
224 SET_BITS_TO_LE_1BYTE((__ph2ccmd), 0, 8, __val)
225#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__ph2ccmd, __val) \
226 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
227#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__ph2ccmd, __val) \
228 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
229#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__ph2ccmd, __val) \
230 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
231#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__ph2ccmd, __val) \
232 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
233
234void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
235void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
236 u8 ap_offload_enable);
237void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
238 u32 cmd_len, u8 *p_cmdbuffer);
239void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
240void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
241 bool dl_finished);
242void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
243int rtl8723be_download_fw(struct ieee80211_hw *hw,
244 bool buse_wake_on_wlan_fw);
245void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
246 u8 p2p_ps_state);
247
248#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
new file mode 100644
index 000000000000..0fdf0909321f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -0,0 +1,2523 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../efuse.h"
28#include "../base.h"
29#include "../regd.h"
30#include "../cam.h"
31#include "../ps.h"
32#include "../pci.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "dm.h"
37#include "../rtl8723com/dm_common.h"
38#include "fw.h"
39#include "../rtl8723com/fw_common.h"
40#include "led.h"
41#include "hw.h"
42#include "pwrseq.h"
43#include "../btcoexist/rtl_btc.h"
44
45#define LLT_CONFIG 5
46
47static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
48{
49 struct rtl_priv *rtlpriv = rtl_priv(hw);
50 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
51 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
52
53 while (skb_queue_len(&ring->queue)) {
54 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
55 struct sk_buff *skb = __skb_dequeue(&ring->queue);
56
57 pci_unmap_single(rtlpci->pdev,
58 rtlpriv->cfg->ops->get_desc(
59 (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
60 skb->len, PCI_DMA_TODEVICE);
61 kfree_skb(skb);
62 ring->idx = (ring->idx + 1) % ring->entries;
63 }
64}
65
66static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
67 u8 set_bits, u8 clear_bits)
68{
69 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
70 struct rtl_priv *rtlpriv = rtl_priv(hw);
71
72 rtlpci->reg_bcn_ctrl_val |= set_bits;
73 rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
74
75 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
76}
77
78static void _rtl8723be_stop_tx_beacon(struct ieee80211_hw *hw)
79{
80 struct rtl_priv *rtlpriv = rtl_priv(hw);
81 u8 tmp1byte;
82
83 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
84 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
85 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
86 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
87 tmp1byte &= ~(BIT(0));
88 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
89}
90
91static void _rtl8723be_resume_tx_beacon(struct ieee80211_hw *hw)
92{
93 struct rtl_priv *rtlpriv = rtl_priv(hw);
94 u8 tmp1byte;
95
96 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
97 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
98 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
99 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
100 tmp1byte |= BIT(1);
101 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
102}
103
104static void _rtl8723be_enable_bcn_sub_func(struct ieee80211_hw *hw)
105{
106 _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(1));
107}
108
109static void _rtl8723be_disable_bcn_sub_func(struct ieee80211_hw *hw)
110{
111 _rtl8723be_set_bcn_ctrl_reg(hw, BIT(1), 0);
112}
113
114static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val,
115 bool need_turn_off_ckk)
116{
117 struct rtl_priv *rtlpriv = rtl_priv(hw);
118 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
119 bool support_remote_wake_up;
120 u32 count = 0, isr_regaddr, content;
121 bool schedule_timer = need_turn_off_ckk;
122 rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
123 (u8 *)(&support_remote_wake_up));
124
125 if (!rtlhal->fw_ready)
126 return;
127 if (!rtlpriv->psc.fw_current_inpsmode)
128 return;
129
130 while (1) {
131 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
132 if (rtlhal->fw_clk_change_in_progress) {
133 while (rtlhal->fw_clk_change_in_progress) {
134 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
135 count++;
136 udelay(100);
137 if (count > 1000)
138 return;
139 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
140 }
141 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
142 } else {
143 rtlhal->fw_clk_change_in_progress = false;
144 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
145 break;
146 }
147 }
148 if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
149 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
150 &rpwm_val);
151 if (FW_PS_IS_ACK(rpwm_val)) {
152 isr_regaddr = REG_HISR;
153 content = rtl_read_dword(rtlpriv, isr_regaddr);
154 while (!(content & IMR_CPWM) && (count < 500)) {
155 udelay(50);
156 count++;
157 content = rtl_read_dword(rtlpriv, isr_regaddr);
158 }
159
160 if (content & IMR_CPWM) {
161 rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
162 rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
163 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
164 "Receive CPWM INT!!! Set "
165 "pHalData->FwPSState = %X\n",
166 rtlhal->fw_ps_state);
167 }
168 }
169 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
170 rtlhal->fw_clk_change_in_progress = false;
171 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
172 if (schedule_timer) {
173 mod_timer(&rtlpriv->works.fw_clockoff_timer,
174 jiffies + MSECS(10));
175 }
176 } else {
177 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
178 rtlhal->fw_clk_change_in_progress = false;
179 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
180 }
181}
182
183static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val)
184{
185 struct rtl_priv *rtlpriv = rtl_priv(hw);
186 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
187 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
188 struct rtl8192_tx_ring *ring;
189 enum rf_pwrstate rtstate;
190 bool schedule_timer = false;
191 u8 queue;
192
193 if (!rtlhal->fw_ready)
194 return;
195 if (!rtlpriv->psc.fw_current_inpsmode)
196 return;
197 if (!rtlhal->allow_sw_to_change_hwclc)
198 return;
199 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate));
200 if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF)
201 return;
202
203 for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
204 ring = &rtlpci->tx_ring[queue];
205 if (skb_queue_len(&ring->queue)) {
206 schedule_timer = true;
207 break;
208 }
209 }
210 if (schedule_timer) {
211 mod_timer(&rtlpriv->works.fw_clockoff_timer,
212 jiffies + MSECS(10));
213 return;
214 }
215 if (FW_PS_STATE(rtlhal->fw_ps_state) !=
216 FW_PS_STATE_RF_OFF_LOW_PWR_88E) {
217 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
218 if (!rtlhal->fw_clk_change_in_progress) {
219 rtlhal->fw_clk_change_in_progress = true;
220 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
221 rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
222 rtl_write_word(rtlpriv, REG_HISR, 0x0100);
223 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
224 &rpwm_val);
225 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
226 rtlhal->fw_clk_change_in_progress = false;
227 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
228 } else {
229 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
230 mod_timer(&rtlpriv->works.fw_clockoff_timer,
231 jiffies + MSECS(10));
232 }
233 }
234}
235
236static void _rtl8723be_set_fw_ps_rf_on(struct ieee80211_hw *hw)
237{
238 u8 rpwm_val = 0;
239 rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK);
240 _rtl8723be_set_fw_clock_on(hw, rpwm_val, true);
241}
242
243static void _rtl8723be_fwlps_leave(struct ieee80211_hw *hw)
244{
245 struct rtl_priv *rtlpriv = rtl_priv(hw);
246 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
247 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
248 bool fw_current_inps = false;
249 u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE;
250
251 if (ppsc->low_power_enable) {
252 rpwm_val = (FW_PS_STATE_ALL_ON_88E | FW_PS_ACK);/* RF on */
253 _rtl8723be_set_fw_clock_on(hw, rpwm_val, false);
254 rtlhal->allow_sw_to_change_hwclc = false;
255 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
256 &fw_pwrmode);
257 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
258 (u8 *)(&fw_current_inps));
259 } else {
260 rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
261 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
262 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
263 &fw_pwrmode);
264 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
265 (u8 *)(&fw_current_inps));
266 }
267}
268
269static void _rtl8723be_fwlps_enter(struct ieee80211_hw *hw)
270{
271 struct rtl_priv *rtlpriv = rtl_priv(hw);
272 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
273 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
274 bool fw_current_inps = true;
275 u8 rpwm_val;
276
277 if (ppsc->low_power_enable) {
278 rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E; /* RF off */
279 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
280 (u8 *)(&fw_current_inps));
281 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
282 &ppsc->fwctrl_psmode);
283 rtlhal->allow_sw_to_change_hwclc = true;
284 _rtl8723be_set_fw_clock_off(hw, rpwm_val);
285
286 } else {
287 rpwm_val = FW_PS_STATE_RF_OFF_88E; /* RF off */
288 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
289 (u8 *)(&fw_current_inps));
290 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
291 &ppsc->fwctrl_psmode);
292 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
293 }
294}
295
296void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
297{
298 struct rtl_priv *rtlpriv = rtl_priv(hw);
299 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
300 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
301
302 switch (variable) {
303 case HW_VAR_RCR:
304 *((u32 *)(val)) = rtlpci->receive_config;
305 break;
306 case HW_VAR_RF_STATE:
307 *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
308 break;
309 case HW_VAR_FWLPS_RF_ON: {
310 enum rf_pwrstate rfstate;
311 u32 val_rcr;
312
313 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
314 (u8 *)(&rfstate));
315 if (rfstate == ERFOFF) {
316 *((bool *)(val)) = true;
317 } else {
318 val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
319 val_rcr &= 0x00070000;
320 if (val_rcr)
321 *((bool *)(val)) = false;
322 else
323 *((bool *)(val)) = true;
324 }
325 break; }
326 case HW_VAR_FW_PSMODE_STATUS:
327 *((bool *)(val)) = ppsc->fw_current_inpsmode;
328 break;
329 case HW_VAR_CORRECT_TSF: {
330 u64 tsf;
331 u32 *ptsf_low = (u32 *)&tsf;
332 u32 *ptsf_high = ((u32 *)&tsf) + 1;
333
334 *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
335 *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
336
337 *((u64 *)(val)) = tsf;
338
339 break; }
340 default:
341 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
342 "switch case not process %x\n", variable);
343 break;
344 }
345}
346
347void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
348{
349 struct rtl_priv *rtlpriv = rtl_priv(hw);
350 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
351 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
352 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
353 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
354 u8 idx;
355
356 switch (variable) {
357 case HW_VAR_ETHER_ADDR:
358 for (idx = 0; idx < ETH_ALEN; idx++)
359 rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]);
360 break;
361 case HW_VAR_BASIC_RATE: {
362 u16 rate_cfg = ((u16 *)val)[0];
363 u8 rate_index = 0;
364 rate_cfg = rate_cfg & 0x15f;
365 rate_cfg |= 0x01;
366 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
367 rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff);
368 while (rate_cfg > 0x1) {
369 rate_cfg = (rate_cfg >> 1);
370 rate_index++;
371 }
372 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index);
373 break; }
374 case HW_VAR_BSSID:
375 for (idx = 0; idx < ETH_ALEN; idx++)
376 rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]);
377 break;
378 case HW_VAR_SIFS:
379 rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
380 rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
381
382 rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
383 rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
384
385 if (!mac->ht_enable)
386 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e);
387 else
388 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
389 *((u16 *)val));
390 break;
391 case HW_VAR_SLOT_TIME: {
392 u8 e_aci;
393
394 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
395 "HW_VAR_SLOT_TIME %x\n", val[0]);
396
397 rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
398
399 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
400 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
401 &e_aci);
402 }
403 break; }
404 case HW_VAR_ACK_PREAMBLE: {
405 u8 reg_tmp;
406 u8 short_preamble = (bool)*val;
407 reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL + 2);
408 if (short_preamble) {
409 reg_tmp |= 0x02;
410 rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
411 } else {
412 reg_tmp &= 0xFD;
413 rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
414 }
415 break; }
416 case HW_VAR_WPA_CONFIG:
417 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
418 break;
419 case HW_VAR_AMPDU_MIN_SPACE: {
420 u8 min_spacing_to_set;
421 u8 sec_min_space;
422
423 min_spacing_to_set = *val;
424 if (min_spacing_to_set <= 7) {
425 sec_min_space = 0;
426
427 if (min_spacing_to_set < sec_min_space)
428 min_spacing_to_set = sec_min_space;
429
430 mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
431 min_spacing_to_set);
432
433 *val = min_spacing_to_set;
434
435 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
436 "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
437 mac->min_space_cfg);
438
439 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
440 mac->min_space_cfg);
441 }
442 break; }
443 case HW_VAR_SHORTGI_DENSITY: {
444 u8 density_to_set;
445
446 density_to_set = *val;
447 mac->min_space_cfg |= (density_to_set << 3);
448
449 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
450 "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
451 mac->min_space_cfg);
452
453 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
454 mac->min_space_cfg);
455 break; }
456 case HW_VAR_AMPDU_FACTOR: {
457 u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
458 u8 factor_toset;
459 u8 *p_regtoset = NULL;
460 u8 index = 0;
461
462 p_regtoset = regtoset_normal;
463
464 factor_toset = *val;
465 if (factor_toset <= 3) {
466 factor_toset = (1 << (factor_toset + 2));
467 if (factor_toset > 0xf)
468 factor_toset = 0xf;
469
470 for (index = 0; index < 4; index++) {
471 if ((p_regtoset[index] & 0xf0) >
472 (factor_toset << 4))
473 p_regtoset[index] =
474 (p_regtoset[index] & 0x0f) |
475 (factor_toset << 4);
476
477 if ((p_regtoset[index] & 0x0f) > factor_toset)
478 p_regtoset[index] =
479 (p_regtoset[index] & 0xf0) |
480 (factor_toset);
481
482 rtl_write_byte(rtlpriv,
483 (REG_AGGLEN_LMT + index),
484 p_regtoset[index]);
485 }
486 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
487 "Set HW_VAR_AMPDU_FACTOR: %#x\n",
488 factor_toset);
489 }
490 break; }
491 case HW_VAR_AC_PARAM: {
492 u8 e_aci = *val;
493 rtl8723_dm_init_edca_turbo(hw);
494
495 if (rtlpci->acm_method != EACMWAY2_SW)
496 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
497 &e_aci);
498 break; }
499 case HW_VAR_ACM_CTRL: {
500 u8 e_aci = *val;
501 union aci_aifsn *p_aci_aifsn =
502 (union aci_aifsn *)(&(mac->ac[0].aifs));
503 u8 acm = p_aci_aifsn->f.acm;
504 u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
505
506 acm_ctrl =
507 acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
508
509 if (acm) {
510 switch (e_aci) {
511 case AC0_BE:
512 acm_ctrl |= ACMHW_BEQEN;
513 break;
514 case AC2_VI:
515 acm_ctrl |= ACMHW_VIQEN;
516 break;
517 case AC3_VO:
518 acm_ctrl |= ACMHW_VOQEN;
519 break;
520 default:
521 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
522 "HW_VAR_ACM_CTRL acm set "
523 "failed: eACI is %d\n", acm);
524 break;
525 }
526 } else {
527 switch (e_aci) {
528 case AC0_BE:
529 acm_ctrl &= (~ACMHW_BEQEN);
530 break;
531 case AC2_VI:
532 acm_ctrl &= (~ACMHW_VIQEN);
533 break;
534 case AC3_VO:
535 acm_ctrl &= (~ACMHW_BEQEN);
536 break;
537 default:
538 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
539 "switch case not process\n");
540 break;
541 }
542 }
543 RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
544 "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
545 "Write 0x%X\n", acm_ctrl);
546 rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
547 break; }
548 case HW_VAR_RCR:
549 rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]);
550 rtlpci->receive_config = ((u32 *)(val))[0];
551 break;
552 case HW_VAR_RETRY_LIMIT: {
553 u8 retry_limit = *val;
554
555 rtl_write_word(rtlpriv, REG_RL,
556 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
557 retry_limit << RETRY_LIMIT_LONG_SHIFT);
558 break; }
559 case HW_VAR_DUAL_TSF_RST:
560 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
561 break;
562 case HW_VAR_EFUSE_BYTES:
563 rtlefuse->efuse_usedbytes = *((u16 *)val);
564 break;
565 case HW_VAR_EFUSE_USAGE:
566 rtlefuse->efuse_usedpercentage = *val;
567 break;
568 case HW_VAR_IO_CMD:
569 rtl8723be_phy_set_io_cmd(hw, (*(enum io_type *)val));
570 break;
571 case HW_VAR_SET_RPWM: {
572 u8 rpwm_val;
573
574 rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
575 udelay(1);
576
577 if (rpwm_val & BIT(7)) {
578 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
579 } else {
580 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
581 }
582 break; }
583 case HW_VAR_H2C_FW_PWRMODE:
584 rtl8723be_set_fw_pwrmode_cmd(hw, *val);
585 break;
586 case HW_VAR_FW_PSMODE_STATUS:
587 ppsc->fw_current_inpsmode = *((bool *)val);
588 break;
589 case HW_VAR_RESUME_CLK_ON:
590 _rtl8723be_set_fw_ps_rf_on(hw);
591 break;
592 case HW_VAR_FW_LPS_ACTION: {
593 bool enter_fwlps = *((bool *)val);
594
595 if (enter_fwlps)
596 _rtl8723be_fwlps_enter(hw);
597 else
598 _rtl8723be_fwlps_leave(hw);
599
600 break; }
601 case HW_VAR_H2C_FW_JOINBSSRPT: {
602 u8 mstatus = *val;
603 u8 tmp_regcr, tmp_reg422, bcnvalid_reg;
604 u8 count = 0, dlbcn_count = 0;
605 bool recover = false;
606
607 if (mstatus == RT_MEDIA_CONNECT) {
608 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
609
610 tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
611 rtl_write_byte(rtlpriv, REG_CR + 1,
612 (tmp_regcr | BIT(0)));
613
614 _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
615 _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
616
617 tmp_reg422 = rtl_read_byte(rtlpriv,
618 REG_FWHW_TXQ_CTRL + 2);
619 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
620 tmp_reg422 & (~BIT(6)));
621 if (tmp_reg422 & BIT(6))
622 recover = true;
623
624 do {
625 bcnvalid_reg = rtl_read_byte(rtlpriv,
626 REG_TDECTRL + 2);
627 rtl_write_byte(rtlpriv, REG_TDECTRL + 2,
628 (bcnvalid_reg | BIT(0)));
629 _rtl8723be_return_beacon_queue_skb(hw);
630
631 rtl8723be_set_fw_rsvdpagepkt(hw, 0);
632 bcnvalid_reg = rtl_read_byte(rtlpriv,
633 REG_TDECTRL + 2);
634 count = 0;
635 while (!(bcnvalid_reg & BIT(0)) && count < 20) {
636 count++;
637 udelay(10);
638 bcnvalid_reg = rtl_read_byte(rtlpriv,
639 REG_TDECTRL + 2);
640 }
641 dlbcn_count++;
642 } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
643
644 if (bcnvalid_reg & BIT(0))
645 rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
646
647 _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
648 _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
649
650 if (recover) {
651 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
652 tmp_reg422);
653 }
654 rtl_write_byte(rtlpriv, REG_CR + 1,
655 (tmp_regcr & ~(BIT(0))));
656 }
657 rtl8723be_set_fw_joinbss_report_cmd(hw, *val);
658 break; }
659 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
660 rtl8723be_set_p2p_ps_offload_cmd(hw, *val);
661 break;
662 case HW_VAR_AID: {
663 u16 u2btmp;
664 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
665 u2btmp &= 0xC000;
666 rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
667 (u2btmp | mac->assoc_id));
668 break; }
669 case HW_VAR_CORRECT_TSF: {
670 u8 btype_ibss = *val;
671
672 if (btype_ibss)
673 _rtl8723be_stop_tx_beacon(hw);
674
675 _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
676
677 rtl_write_dword(rtlpriv, REG_TSFTR,
678 (u32) (mac->tsf & 0xffffffff));
679 rtl_write_dword(rtlpriv, REG_TSFTR + 4,
680 (u32) ((mac->tsf >> 32) & 0xffffffff));
681
682 _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
683
684 if (btype_ibss)
685 _rtl8723be_resume_tx_beacon(hw);
686 break; }
687 case HW_VAR_KEEP_ALIVE: {
688 u8 array[2];
689 array[0] = 0xff;
690 array[1] = *val;
691 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_KEEP_ALIVE_CTRL,
692 2, array);
693 break; }
694 default:
695 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
696 "switch case not process %x\n",
697 variable);
698 break;
699 }
700}
701
702static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
703{
704 struct rtl_priv *rtlpriv = rtl_priv(hw);
705 bool status = true;
706 int count = 0;
707 u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
708 _LLT_OP(_LLT_WRITE_ACCESS);
709
710 rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
711
712 do {
713 value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
714 if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
715 break;
716
717 if (count > POLLING_LLT_THRESHOLD) {
718 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
719 "Failed to polling write LLT done at "
720 "address %d!\n", address);
721 status = false;
722 break;
723 }
724 } while (++count);
725
726 return status;
727}
728
729static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
730{
731 struct rtl_priv *rtlpriv = rtl_priv(hw);
732 unsigned short i;
733 u8 txpktbuf_bndy;
734 u8 maxpage;
735 bool status;
736
737 maxpage = 255;
738 txpktbuf_bndy = 245;
739
740 rtl_write_dword(rtlpriv, REG_TRXFF_BNDY,
741 (0x27FF0000 | txpktbuf_bndy));
742 rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
743
744 rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
745 rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
746
747 rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
748 rtl_write_byte(rtlpriv, REG_PBP, 0x31);
749 rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
750
751 for (i = 0; i < (txpktbuf_bndy - 1); i++) {
752 status = _rtl8723be_llt_write(hw, i, i + 1);
753 if (!status)
754 return status;
755 }
756 status = _rtl8723be_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
757
758 if (!status)
759 return status;
760
761 for (i = txpktbuf_bndy; i < maxpage; i++) {
762 status = _rtl8723be_llt_write(hw, i, (i + 1));
763 if (!status)
764 return status;
765 }
766 status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy);
767 if (!status)
768 return status;
769
770 rtl_write_dword(rtlpriv, REG_RQPN, 0x80e40808);
771 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00);
772
773 return true;
774}
775
776static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw)
777{
778 struct rtl_priv *rtlpriv = rtl_priv(hw);
779 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
780 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
781 struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
782
783 if (rtlpriv->rtlhal.up_first_time)
784 return;
785
786 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
787 rtl8723be_sw_led_on(hw, pled0);
788 else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
789 rtl8723be_sw_led_on(hw, pled0);
790 else
791 rtl8723be_sw_led_off(hw, pled0);
792}
793
794static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
795{
796 struct rtl_priv *rtlpriv = rtl_priv(hw);
797 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
798
799 unsigned char bytetmp;
800 unsigned short wordtmp;
801 u16 retry = 0;
802 bool mac_func_enable;
803
804 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
805
806 /*Auto Power Down to CHIP-off State*/
807 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
808 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
809
810 bytetmp = rtl_read_byte(rtlpriv, REG_CR);
811 if (bytetmp == 0xFF)
812 mac_func_enable = true;
813 else
814 mac_func_enable = false;
815
816 /* HW Power on sequence */
817 if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
818 PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
819 RTL8723_NIC_ENABLE_FLOW)) {
820 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
821 "init MAC Fail as power on failure\n");
822 return false;
823 }
824 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
825 rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
826
827 bytetmp = rtl_read_byte(rtlpriv, REG_CR);
828 bytetmp = 0xff;
829 rtl_write_byte(rtlpriv, REG_CR, bytetmp);
830 mdelay(2);
831
832 bytetmp = rtl_read_byte(rtlpriv, REG_HWSEQ_CTRL);
833 bytetmp |= 0x7f;
834 rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp);
835 mdelay(2);
836
837 bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3);
838 if (bytetmp & BIT(0)) {
839 bytetmp = rtl_read_byte(rtlpriv, 0x7c);
840 bytetmp |= BIT(6);
841 rtl_write_byte(rtlpriv, 0x7c, bytetmp);
842 }
843 bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR);
844 bytetmp |= BIT(3);
845 rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp);
846 bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1);
847 bytetmp &= ~BIT(4);
848 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp);
849
850 bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+3);
851 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+3, bytetmp | 0x77);
852
853 rtl_write_word(rtlpriv, REG_CR, 0x2ff);
854
855 if (!mac_func_enable) {
856 if (!_rtl8723be_llt_table_init(hw))
857 return false;
858 }
859 rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
860 rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
861
862 /* Enable FW Beamformer Interrupt */
863 bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3);
864 rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6));
865
866 wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
867 wordtmp &= 0xf;
868 wordtmp |= 0xF5B1;
869 rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
870
871 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
872 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
873 rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
874 rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
875
876 rtl_write_byte(rtlpriv, 0x4d0, 0x0);
877
878 rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
879 ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
880 DMA_BIT_MASK(32));
881 rtl_write_dword(rtlpriv, REG_MGQ_DESA,
882 (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
883 DMA_BIT_MASK(32));
884 rtl_write_dword(rtlpriv, REG_VOQ_DESA,
885 (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
886 rtl_write_dword(rtlpriv, REG_VIQ_DESA,
887 (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
888 rtl_write_dword(rtlpriv, REG_BEQ_DESA,
889 (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
890 rtl_write_dword(rtlpriv, REG_BKQ_DESA,
891 (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
892 rtl_write_dword(rtlpriv, REG_HQ_DESA,
893 (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
894 DMA_BIT_MASK(32));
895 rtl_write_dword(rtlpriv, REG_RX_DESA,
896 (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
897 DMA_BIT_MASK(32));
898
899 bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 3);
900 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, bytetmp | 0x77);
901
902 rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
903
904 bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
905 rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
906
907 rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3);
908
909 do {
910 retry++;
911 bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
912 } while ((retry < 200) && (bytetmp & BIT(7)));
913
914 _rtl8723be_gen_refresh_led_state(hw);
915
916 rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
917
918 bytetmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
919 rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, bytetmp & ~BIT(2));
920
921 return true;
922}
923
924static void _rtl8723be_hw_configure(struct ieee80211_hw *hw)
925{
926 struct rtl_priv *rtlpriv = rtl_priv(hw);
927 u8 reg_bw_opmode;
928 u32 reg_ratr, reg_prsr;
929
930 reg_bw_opmode = BW_OPMODE_20MHZ;
931 reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
932 RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
933 reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
934
935 rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
936 rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
937}
938
939static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
940{
941 struct rtl_priv *rtlpriv = rtl_priv(hw);
942 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
943
944 rtl_write_byte(rtlpriv, 0x34b, 0x93);
945 rtl_write_word(rtlpriv, 0x350, 0x870c);
946 rtl_write_byte(rtlpriv, 0x352, 0x1);
947
948 if (ppsc->support_backdoor)
949 rtl_write_byte(rtlpriv, 0x349, 0x1b);
950 else
951 rtl_write_byte(rtlpriv, 0x349, 0x03);
952
953 rtl_write_word(rtlpriv, 0x350, 0x2718);
954 rtl_write_byte(rtlpriv, 0x352, 0x1);
955}
956
957void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
958{
959 struct rtl_priv *rtlpriv = rtl_priv(hw);
960 u8 sec_reg_value;
961
962 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
963 "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
964 rtlpriv->sec.pairwise_enc_algorithm,
965 rtlpriv->sec.group_enc_algorithm);
966
967 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
968 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
969 "not open hw encryption\n");
970 return;
971 }
972 sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
973
974 if (rtlpriv->sec.use_defaultkey) {
975 sec_reg_value |= SCR_TXUSEDK;
976 sec_reg_value |= SCR_RXUSEDK;
977 }
978 sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
979
980 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
981
982 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n",
983 sec_reg_value);
984
985 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
986}
987
988int rtl8723be_hw_init(struct ieee80211_hw *hw)
989{
990 struct rtl_priv *rtlpriv = rtl_priv(hw);
991 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
992 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
993 struct rtl_phy *rtlphy = &(rtlpriv->phy);
994 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
995 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
996 bool rtstatus = true;
997 int err;
998 u8 tmp_u1b;
999 unsigned long flags;
1000
1001 /* reenable interrupts to not interfere with other devices */
1002 local_save_flags(flags);
1003 local_irq_enable();
1004
1005 rtlpriv->rtlhal.being_init_adapter = true;
1006 rtlpriv->intf_ops->disable_aspm(hw);
1007 rtstatus = _rtl8723be_init_mac(hw);
1008 if (!rtstatus) {
1009 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
1010 err = 1;
1011 goto exit;
1012 }
1013 tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG);
1014 tmp_u1b &= 0x7F;
1015 rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b);
1016
1017 err = rtl8723_download_fw(hw, true);
1018 if (err) {
1019 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1020 "Failed to download FW. Init HW without FW now..\n");
1021 err = 1;
1022 rtlhal->fw_ready = false;
1023 goto exit;
1024 } else {
1025 rtlhal->fw_ready = true;
1026 }
1027 rtlhal->last_hmeboxnum = 0;
1028 rtl8723be_phy_mac_config(hw);
1029 /* because last function modify RCR, so we update
1030 * rcr var here, or TP will unstable for receive_config
1031 * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
1032 * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
1033 */
1034 rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
1035 rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
1036 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
1037
1038 rtl8723be_phy_bb_config(hw);
1039 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
1040 rtl8723be_phy_rf_config(hw);
1041
1042 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
1043 RF_CHNLBW, RFREG_OFFSET_MASK);
1044 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
1045 RF_CHNLBW, RFREG_OFFSET_MASK);
1046 rtlphy->rfreg_chnlval[0] &= 0xFFF03FF;
1047 rtlphy->rfreg_chnlval[0] |= (BIT(10) | BIT(11));
1048
1049 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
1050 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
1051 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
1052 _rtl8723be_hw_configure(hw);
1053 rtl_cam_reset_all_entry(hw);
1054 rtl8723be_enable_hw_security_config(hw);
1055
1056 ppsc->rfpwr_state = ERFON;
1057
1058 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
1059 _rtl8723be_enable_aspm_back_door(hw);
1060 rtlpriv->intf_ops->enable_aspm(hw);
1061
1062 rtl8723be_bt_hw_init(hw);
1063
1064 rtl_set_bbreg(hw, 0x64, BIT(20), 0);
1065 rtl_set_bbreg(hw, 0x64, BIT(24), 0);
1066
1067 rtl_set_bbreg(hw, 0x40, BIT(4), 0);
1068 rtl_set_bbreg(hw, 0x40, BIT(3), 1);
1069
1070 rtl_set_bbreg(hw, 0x944, BIT(0)|BIT(1), 0x3);
1071 rtl_set_bbreg(hw, 0x930, 0xff, 0x77);
1072
1073 rtl_set_bbreg(hw, 0x38, BIT(11), 0x1);
1074
1075 rtl_set_bbreg(hw, 0xb2c, 0xffffffff, 0x80000000);
1076
1077 if (ppsc->rfpwr_state == ERFON) {
1078 rtl8723be_dm_check_txpower_tracking(hw);
1079 rtl8723be_phy_lc_calibrate(hw);
1080 }
1081 tmp_u1b = efuse_read_1byte(hw, 0x1FA);
1082 if (!(tmp_u1b & BIT(0))) {
1083 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
1084 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
1085 }
1086 if (!(tmp_u1b & BIT(4))) {
1087 tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
1088 tmp_u1b &= 0x0F;
1089 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
1090 udelay(10);
1091 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
1092 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
1093 }
1094 rtl8723be_dm_init(hw);
1095exit:
1096 local_irq_restore(flags);
1097 rtlpriv->rtlhal.being_init_adapter = false;
1098 return err;
1099}
1100
1101static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
1102{
1103 struct rtl_priv *rtlpriv = rtl_priv(hw);
1104 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1105 enum version_8723e version = VERSION_UNKNOWN;
1106 u8 count = 0;
1107 u8 value8;
1108 u32 value32;
1109
1110 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0);
1111
1112 value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 2);
1113 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 2, value8 | BIT(0));
1114
1115 value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
1116 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, value8 | BIT(0));
1117
1118 value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
1119 while (((value8 & BIT(0))) && (count++ < 100)) {
1120 udelay(10);
1121 value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
1122 }
1123 count = 0;
1124 value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
1125 while ((value8 == 0) && (count++ < 50)) {
1126 value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
1127 mdelay(1);
1128 }
1129 value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
1130 if ((value32 & (CHIP_8723B)) != CHIP_8723B)
1131 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n");
1132 else
1133 version = (enum version_8723e) VERSION_TEST_CHIP_1T1R_8723B;
1134
1135 rtlphy->rf_type = RF_1T1R;
1136
1137 value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
1138 if (value8 >= 0x02)
1139 version |= BIT(3);
1140 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1141 "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
1142 "RF_2T2R" : "RF_1T1R");
1143
1144 return version;
1145}
1146
1147static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
1148 enum nl80211_iftype type)
1149{
1150 struct rtl_priv *rtlpriv = rtl_priv(hw);
1151 u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc;
1152 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1153
1154 rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
1155 RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
1156 "clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
1157
1158 if (type == NL80211_IFTYPE_UNSPECIFIED ||
1159 type == NL80211_IFTYPE_STATION) {
1160 _rtl8723be_stop_tx_beacon(hw);
1161 _rtl8723be_enable_bcn_sub_func(hw);
1162 } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
1163 _rtl8723be_resume_tx_beacon(hw);
1164 _rtl8723be_disable_bcn_sub_func(hw);
1165 } else {
1166 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1167 "Set HW_VAR_MEDIA_STATUS: "
1168 "No such media status(%x).\n", type);
1169 }
1170 switch (type) {
1171 case NL80211_IFTYPE_UNSPECIFIED:
1172 bt_msr |= MSR_NOLINK;
1173 ledaction = LED_CTL_LINK;
1174 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1175 "Set Network type to NO LINK!\n");
1176 break;
1177 case NL80211_IFTYPE_ADHOC:
1178 bt_msr |= MSR_ADHOC;
1179 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1180 "Set Network type to Ad Hoc!\n");
1181 break;
1182 case NL80211_IFTYPE_STATION:
1183 bt_msr |= MSR_INFRA;
1184 ledaction = LED_CTL_LINK;
1185 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1186 "Set Network type to STA!\n");
1187 break;
1188 case NL80211_IFTYPE_AP:
1189 bt_msr |= MSR_AP;
1190 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1191 "Set Network type to AP!\n");
1192 break;
1193 default:
1194 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1195 "Network type %d not support!\n", type);
1196 return 1;
1197 }
1198 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1199 rtlpriv->cfg->ops->led_control(hw, ledaction);
1200 if ((bt_msr & 0x03) == MSR_AP)
1201 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
1202 else
1203 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
1204 return 0;
1205}
1206
1207void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1211 u32 reg_rcr = rtlpci->receive_config;
1212
1213 if (rtlpriv->psc.rfpwr_state != ERFON)
1214 return;
1215
1216 if (check_bssid) {
1217 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1218 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1219 (u8 *)(&reg_rcr));
1220 _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
1221 } else if (!check_bssid) {
1222 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1223 _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
1224 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1225 (u8 *)(&reg_rcr));
1226 }
1227}
1228
1229int rtl8723be_set_network_type(struct ieee80211_hw *hw,
1230 enum nl80211_iftype type)
1231{
1232 struct rtl_priv *rtlpriv = rtl_priv(hw);
1233
1234 if (_rtl8723be_set_media_status(hw, type))
1235 return -EOPNOTSUPP;
1236
1237 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1238 if (type != NL80211_IFTYPE_AP)
1239 rtl8723be_set_check_bssid(hw, true);
1240 } else {
1241 rtl8723be_set_check_bssid(hw, false);
1242 }
1243 return 0;
1244}
1245
1246/* don't set REG_EDCA_BE_PARAM here
1247 * because mac80211 will send pkt when scan
1248 */
1249void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
1250{
1251 struct rtl_priv *rtlpriv = rtl_priv(hw);
1252 rtl8723_dm_init_edca_turbo(hw);
1253 switch (aci) {
1254 case AC1_BK:
1255 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
1256 break;
1257 case AC0_BE:
1258 break;
1259 case AC2_VI:
1260 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
1261 break;
1262 case AC3_VO:
1263 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
1264 break;
1265 default:
1266 RT_ASSERT(false, "invalid aci: %d !\n", aci);
1267 break;
1268 }
1269}
1270
1271void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
1272{
1273 struct rtl_priv *rtlpriv = rtl_priv(hw);
1274 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1275
1276 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
1277 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
1278 rtlpci->irq_enabled = true;
1279 /* there are some C2H CMDs have been sent
1280 * before system interrupt is enabled, e.g., C2H, CPWM.
1281 * So we need to clear all C2H events that FW has notified,
1282 * otherwise FW won't schedule any commands anymore.
1283 */
1284 rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
1285 /*enable system interrupt*/
1286 rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
1287}
1288
1289void rtl8723be_disable_interrupt(struct ieee80211_hw *hw)
1290{
1291 struct rtl_priv *rtlpriv = rtl_priv(hw);
1292 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1293
1294 rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
1295 rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
1296 rtlpci->irq_enabled = false;
1297 synchronize_irq(rtlpci->pdev->irq);
1298}
1299
1300static void _rtl8723be_poweroff_adapter(struct ieee80211_hw *hw)
1301{
1302 struct rtl_priv *rtlpriv = rtl_priv(hw);
1303 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1304 u8 u1b_tmp;
1305
1306 /* Combo (PCIe + USB) Card and PCIe-MF Card */
1307 /* 1. Run LPS WL RFOFF flow */
1308 rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1309 PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW);
1310
1311 /* 2. 0x1F[7:0] = 0 */
1312 /* turn off RF */
1313 rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
1314 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) &&
1315 rtlhal->fw_ready)
1316 rtl8723be_firmware_selfreset(hw);
1317
1318 /* Reset MCU. Suggested by Filen. */
1319 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
1320 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
1321
1322 /* g. MCUFWDL 0x80[1:0]= 0 */
1323 /* reset MCU ready status */
1324 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
1325
1326 /* HW card disable configuration. */
1327 rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1328 PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW);
1329
1330 /* Reset MCU IO Wrapper */
1331 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
1332 rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
1333 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
1334 rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0));
1335
1336 /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
1337 /* lock ISO/CLK/Power control register */
1338 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
1339}
1340
1341void rtl8723be_card_disable(struct ieee80211_hw *hw)
1342{
1343 struct rtl_priv *rtlpriv = rtl_priv(hw);
1344 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1345 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1346 enum nl80211_iftype opmode;
1347
1348 mac->link_state = MAC80211_NOLINK;
1349 opmode = NL80211_IFTYPE_UNSPECIFIED;
1350 _rtl8723be_set_media_status(hw, opmode);
1351 if (rtlpriv->rtlhal.driver_is_goingto_unload ||
1352 ppsc->rfoff_reason > RF_CHANGE_BY_PS)
1353 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1354 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1355 _rtl8723be_poweroff_adapter(hw);
1356
1357 /* after power off we should do iqk again */
1358 rtlpriv->phy.iqk_initialized = false;
1359}
1360
1361void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
1362 u32 *p_inta, u32 *p_intb)
1363{
1364 struct rtl_priv *rtlpriv = rtl_priv(hw);
1365 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1366
1367 *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
1368 rtl_write_dword(rtlpriv, ISR, *p_inta);
1369
1370 *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
1371 rtlpci->irq_mask[1];
1372 rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
1373}
1374
1375void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
1376{
1377 struct rtl_priv *rtlpriv = rtl_priv(hw);
1378 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1379 u16 bcn_interval, atim_window;
1380
1381 bcn_interval = mac->beacon_interval;
1382 atim_window = 2; /*FIX MERGE */
1383 rtl8723be_disable_interrupt(hw);
1384 rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
1385 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1386 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
1387 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
1388 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
1389 rtl_write_byte(rtlpriv, 0x606, 0x30);
1390 rtl8723be_enable_interrupt(hw);
1391}
1392
1393void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw)
1394{
1395 struct rtl_priv *rtlpriv = rtl_priv(hw);
1396 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1397 u16 bcn_interval = mac->beacon_interval;
1398
1399 RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
1400 "beacon_interval:%d\n", bcn_interval);
1401 rtl8723be_disable_interrupt(hw);
1402 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1403 rtl8723be_enable_interrupt(hw);
1404}
1405
1406void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
1407 u32 add_msr, u32 rm_msr)
1408{
1409 struct rtl_priv *rtlpriv = rtl_priv(hw);
1410 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1411
1412 RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
1413 "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
1414
1415 if (add_msr)
1416 rtlpci->irq_mask[0] |= add_msr;
1417 if (rm_msr)
1418 rtlpci->irq_mask[0] &= (~rm_msr);
1419 rtl8723be_disable_interrupt(hw);
1420 rtl8723be_enable_interrupt(hw);
1421}
1422
1423static u8 _rtl8723be_get_chnl_group(u8 chnl)
1424{
1425 u8 group;
1426
1427 if (chnl < 3)
1428 group = 0;
1429 else if (chnl < 9)
1430 group = 1;
1431 else
1432 group = 2;
1433 return group;
1434}
1435
1436static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw,
1437 struct txpower_info_2g *pw2g,
1438 struct txpower_info_5g *pw5g,
1439 bool autoload_fail, u8 *hwinfo)
1440{
1441 struct rtl_priv *rtlpriv = rtl_priv(hw);
1442 u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0;
1443
1444 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1445 "hal_ReadPowerValueFromPROM8723BE(): "
1446 "PROMContent[0x%x]= 0x%x\n",
1447 (addr + 1), hwinfo[addr + 1]);
1448 if (0xFF == hwinfo[addr + 1]) /*YJ, add, 120316*/
1449 autoload_fail = true;
1450
1451 if (autoload_fail) {
1452 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1453 "auto load fail : Use Default value!\n");
1454 for (path = 0; path < MAX_RF_PATH; path++) {
1455 /* 2.4G default value */
1456 for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
1457 pw2g->index_cck_base[path][group] = 0x2D;
1458 pw2g->index_bw40_base[path][group] = 0x2D;
1459 }
1460 for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
1461 if (cnt == 0) {
1462 pw2g->bw20_diff[path][0] = 0x02;
1463 pw2g->ofdm_diff[path][0] = 0x04;
1464 } else {
1465 pw2g->bw20_diff[path][cnt] = 0xFE;
1466 pw2g->bw40_diff[path][cnt] = 0xFE;
1467 pw2g->cck_diff[path][cnt] = 0xFE;
1468 pw2g->ofdm_diff[path][cnt] = 0xFE;
1469 }
1470 }
1471 }
1472 return;
1473 }
1474 for (path = 0; path < MAX_RF_PATH; path++) {
1475 /*2.4G default value*/
1476 for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
1477 pw2g->index_cck_base[path][group] = hwinfo[addr++];
1478 if (pw2g->index_cck_base[path][group] == 0xFF)
1479 pw2g->index_cck_base[path][group] = 0x2D;
1480 }
1481 for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) {
1482 pw2g->index_bw40_base[path][group] = hwinfo[addr++];
1483 if (pw2g->index_bw40_base[path][group] == 0xFF)
1484 pw2g->index_bw40_base[path][group] = 0x2D;
1485 }
1486 for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
1487 if (cnt == 0) {
1488 pw2g->bw40_diff[path][cnt] = 0;
1489 if (hwinfo[addr] == 0xFF) {
1490 pw2g->bw20_diff[path][cnt] = 0x02;
1491 } else {
1492 pw2g->bw20_diff[path][cnt] =
1493 (hwinfo[addr] & 0xf0) >> 4;
1494 /*bit sign number to 8 bit sign number*/
1495 if (pw2g->bw20_diff[path][cnt] & BIT(3))
1496 pw2g->bw20_diff[path][cnt] |= 0xF0;
1497 }
1498 if (hwinfo[addr] == 0xFF) {
1499 pw2g->ofdm_diff[path][cnt] = 0x04;
1500 } else {
1501 pw2g->ofdm_diff[path][cnt] =
1502 (hwinfo[addr] & 0x0f);
1503 /*bit sign number to 8 bit sign number*/
1504 if (pw2g->ofdm_diff[path][cnt] & BIT(3))
1505 pw2g->ofdm_diff[path][cnt] |=
1506 0xF0;
1507 }
1508 pw2g->cck_diff[path][cnt] = 0;
1509 addr++;
1510 } else {
1511 if (hwinfo[addr] == 0xFF) {
1512 pw2g->bw40_diff[path][cnt] = 0xFE;
1513 } else {
1514 pw2g->bw40_diff[path][cnt] =
1515 (hwinfo[addr] & 0xf0) >> 4;
1516 if (pw2g->bw40_diff[path][cnt] & BIT(3))
1517 pw2g->bw40_diff[path][cnt] |=
1518 0xF0;
1519 }
1520 if (hwinfo[addr] == 0xFF) {
1521 pw2g->bw20_diff[path][cnt] = 0xFE;
1522 } else {
1523 pw2g->bw20_diff[path][cnt] =
1524 (hwinfo[addr] & 0x0f);
1525 if (pw2g->bw20_diff[path][cnt] & BIT(3))
1526 pw2g->bw20_diff[path][cnt] |=
1527 0xF0;
1528 }
1529 addr++;
1530
1531 if (hwinfo[addr] == 0xFF) {
1532 pw2g->ofdm_diff[path][cnt] = 0xFE;
1533 } else {
1534 pw2g->ofdm_diff[path][cnt] =
1535 (hwinfo[addr] & 0xf0) >> 4;
1536 if (pw2g->ofdm_diff[path][cnt] & BIT(3))
1537 pw2g->ofdm_diff[path][cnt] |=
1538 0xF0;
1539 }
1540 if (hwinfo[addr] == 0xFF) {
1541 pw2g->cck_diff[path][cnt] = 0xFE;
1542 } else {
1543 pw2g->cck_diff[path][cnt] =
1544 (hwinfo[addr] & 0x0f);
1545 if (pw2g->cck_diff[path][cnt] & BIT(3))
1546 pw2g->cck_diff[path][cnt] |=
1547 0xF0;
1548 }
1549 addr++;
1550 }
1551 }
1552 /*5G default value*/
1553 for (group = 0; group < MAX_CHNL_GROUP_5G; group++) {
1554 pw5g->index_bw40_base[path][group] = hwinfo[addr++];
1555 if (pw5g->index_bw40_base[path][group] == 0xFF)
1556 pw5g->index_bw40_base[path][group] = 0xFE;
1557 }
1558 for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
1559 if (cnt == 0) {
1560 pw5g->bw40_diff[path][cnt] = 0;
1561
1562 if (hwinfo[addr] == 0xFF) {
1563 pw5g->bw20_diff[path][cnt] = 0;
1564 } else {
1565 pw5g->bw20_diff[path][0] =
1566 (hwinfo[addr] & 0xf0) >> 4;
1567 if (pw5g->bw20_diff[path][cnt] & BIT(3))
1568 pw5g->bw20_diff[path][cnt] |=
1569 0xF0;
1570 }
1571 if (hwinfo[addr] == 0xFF) {
1572 pw5g->ofdm_diff[path][cnt] = 0x04;
1573 } else {
1574 pw5g->ofdm_diff[path][0] =
1575 (hwinfo[addr] & 0x0f);
1576 if (pw5g->ofdm_diff[path][cnt] & BIT(3))
1577 pw5g->ofdm_diff[path][cnt] |=
1578 0xF0;
1579 }
1580 addr++;
1581 } else {
1582 if (hwinfo[addr] == 0xFF) {
1583 pw5g->bw40_diff[path][cnt] = 0xFE;
1584 } else {
1585 pw5g->bw40_diff[path][cnt] =
1586 (hwinfo[addr] & 0xf0) >> 4;
1587 if (pw5g->bw40_diff[path][cnt] & BIT(3))
1588 pw5g->bw40_diff[path][cnt] |= 0xF0;
1589 }
1590 if (hwinfo[addr] == 0xFF) {
1591 pw5g->bw20_diff[path][cnt] = 0xFE;
1592 } else {
1593 pw5g->bw20_diff[path][cnt] =
1594 (hwinfo[addr] & 0x0f);
1595 if (pw5g->bw20_diff[path][cnt] & BIT(3))
1596 pw5g->bw20_diff[path][cnt] |= 0xF0;
1597 }
1598 addr++;
1599 }
1600 }
1601 if (hwinfo[addr] == 0xFF) {
1602 pw5g->ofdm_diff[path][1] = 0xFE;
1603 pw5g->ofdm_diff[path][2] = 0xFE;
1604 } else {
1605 pw5g->ofdm_diff[path][1] = (hwinfo[addr] & 0xf0) >> 4;
1606 pw5g->ofdm_diff[path][2] = (hwinfo[addr] & 0x0f);
1607 }
1608 addr++;
1609
1610 if (hwinfo[addr] == 0xFF)
1611 pw5g->ofdm_diff[path][3] = 0xFE;
1612 else
1613 pw5g->ofdm_diff[path][3] = (hwinfo[addr] & 0x0f);
1614 addr++;
1615
1616 for (cnt = 1; cnt < MAX_TX_COUNT; cnt++) {
1617 if (pw5g->ofdm_diff[path][cnt] == 0xFF)
1618 pw5g->ofdm_diff[path][cnt] = 0xFE;
1619 else if (pw5g->ofdm_diff[path][cnt] & BIT(3))
1620 pw5g->ofdm_diff[path][cnt] |= 0xF0;
1621 }
1622 }
1623}
1624
1625static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1626 bool autoload_fail,
1627 u8 *hwinfo)
1628{
1629 struct rtl_priv *rtlpriv = rtl_priv(hw);
1630 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1631 struct txpower_info_2g pw2g;
1632 struct txpower_info_5g pw5g;
1633 u8 rf_path, index;
1634 u8 i;
1635
1636 _rtl8723be_read_power_value_fromprom(hw, &pw2g, &pw5g, autoload_fail,
1637 hwinfo);
1638
1639 for (rf_path = 0; rf_path < 2; rf_path++) {
1640 for (i = 0; i < 14; i++) {
1641 index = _rtl8723be_get_chnl_group(i+1);
1642
1643 rtlefuse->txpwrlevel_cck[rf_path][i] =
1644 pw2g.index_cck_base[rf_path][index];
1645 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
1646 pw2g.index_bw40_base[rf_path][index];
1647 }
1648 for (i = 0; i < MAX_TX_COUNT; i++) {
1649 rtlefuse->txpwr_ht20diff[rf_path][i] =
1650 pw2g.bw20_diff[rf_path][i];
1651 rtlefuse->txpwr_ht40diff[rf_path][i] =
1652 pw2g.bw40_diff[rf_path][i];
1653 rtlefuse->txpwr_legacyhtdiff[rf_path][i] =
1654 pw2g.ofdm_diff[rf_path][i];
1655 }
1656 for (i = 0; i < 14; i++) {
1657 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1658 "RF(%d)-Ch(%d) [CCK / HT40_1S ] = "
1659 "[0x%x / 0x%x ]\n", rf_path, i,
1660 rtlefuse->txpwrlevel_cck[rf_path][i],
1661 rtlefuse->txpwrlevel_ht40_1s[rf_path][i]);
1662 }
1663 }
1664 if (!autoload_fail)
1665 rtlefuse->eeprom_thermalmeter =
1666 hwinfo[EEPROM_THERMAL_METER_88E];
1667 else
1668 rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
1669
1670 if (rtlefuse->eeprom_thermalmeter == 0xff || autoload_fail) {
1671 rtlefuse->apk_thermalmeterignore = true;
1672 rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
1673 }
1674 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1675 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1676 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
1677
1678 if (!autoload_fail) {
1679 rtlefuse->eeprom_regulatory =
1680 hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x07;/*bit0~2*/
1681 if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
1682 rtlefuse->eeprom_regulatory = 0;
1683 } else {
1684 rtlefuse->eeprom_regulatory = 0;
1685 }
1686 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1687 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
1688}
1689
1690static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
1691 bool pseudo_test)
1692{
1693 struct rtl_priv *rtlpriv = rtl_priv(hw);
1694 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1695 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1696 u16 i, usvalue;
1697 u8 hwinfo[HWSET_MAX_SIZE];
1698 u16 eeprom_id;
1699 bool is_toshiba_smid1 = false;
1700 bool is_toshiba_smid2 = false;
1701 bool is_samsung_smid = false;
1702 bool is_lenovo_smid = false;
1703 u16 toshiba_smid1[] = {
1704 0x6151, 0x6152, 0x6154, 0x6155, 0x6177, 0x6178, 0x6179, 0x6180,
1705 0x7151, 0x7152, 0x7154, 0x7155, 0x7177, 0x7178, 0x7179, 0x7180,
1706 0x8151, 0x8152, 0x8154, 0x8155, 0x8181, 0x8182, 0x8184, 0x8185,
1707 0x9151, 0x9152, 0x9154, 0x9155, 0x9181, 0x9182, 0x9184, 0x9185
1708 };
1709 u16 toshiba_smid2[] = {
1710 0x6181, 0x6184, 0x6185, 0x7181, 0x7182, 0x7184, 0x7185, 0x8181,
1711 0x8182, 0x8184, 0x8185, 0x9181, 0x9182, 0x9184, 0x9185
1712 };
1713 u16 samsung_smid[] = {
1714 0x6191, 0x6192, 0x6193, 0x7191, 0x7192, 0x7193, 0x8191, 0x8192,
1715 0x8193, 0x9191, 0x9192, 0x9193
1716 };
1717 u16 lenovo_smid[] = {
1718 0x8195, 0x9195, 0x7194, 0x8200, 0x8201, 0x8202, 0x9199, 0x9200
1719 };
1720
1721 if (pseudo_test) {
1722 /* needs to be added */
1723 return;
1724 }
1725 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
1726 rtl_efuse_shadow_map_update(hw);
1727
1728 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
1729 HWSET_MAX_SIZE);
1730 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1731 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1732 "RTL819X Not boot from eeprom, check it !!");
1733 }
1734 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
1735 hwinfo, HWSET_MAX_SIZE);
1736
1737 eeprom_id = *((u16 *)&hwinfo[0]);
1738 if (eeprom_id != RTL8723BE_EEPROM_ID) {
1739 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1740 "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
1741 rtlefuse->autoload_failflag = true;
1742 } else {
1743 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
1744 rtlefuse->autoload_failflag = false;
1745 }
1746 if (rtlefuse->autoload_failflag)
1747 return;
1748
1749 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
1750 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
1751 rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
1752 rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
1753 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1754 "EEPROMId = 0x%4x\n", eeprom_id);
1755 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1756 "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
1757 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1758 "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
1759 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1760 "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
1761 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1762 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
1763
1764 for (i = 0; i < 6; i += 2) {
1765 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
1766 *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
1767 }
1768 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n",
1769 rtlefuse->dev_addr);
1770
1771 /*parse xtal*/
1772 rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8723BE];
1773 if (rtlefuse->crystalcap == 0xFF)
1774 rtlefuse->crystalcap = 0x20;
1775
1776 _rtl8723be_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
1777 hwinfo);
1778
1779 rtl8723be_read_bt_coexist_info_from_hwpg(hw,
1780 rtlefuse->autoload_failflag,
1781 hwinfo);
1782
1783 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
1784 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1785 rtlefuse->txpwr_fromeprom = true;
1786 rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
1787
1788 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1789 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
1790
1791 /* set channel plan to world wide 13 */
1792 rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
1793
1794 if (rtlhal->oem_id == RT_CID_DEFAULT) {
1795 /* Does this one have a Toshiba SMID from group 1? */
1796 for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
1797 if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
1798 is_toshiba_smid1 = true;
1799 break;
1800 }
1801 }
1802 /* Does this one have a Toshiba SMID from group 2? */
1803 for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
1804 if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
1805 is_toshiba_smid2 = true;
1806 break;
1807 }
1808 }
1809 /* Does this one have a Samsung SMID? */
1810 for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
1811 if (rtlefuse->eeprom_smid == samsung_smid[i]) {
1812 is_samsung_smid = true;
1813 break;
1814 }
1815 }
1816 /* Does this one have a Lenovo SMID? */
1817 for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
1818 if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
1819 is_lenovo_smid = true;
1820 break;
1821 }
1822 }
1823 switch (rtlefuse->eeprom_oemid) {
1824 case EEPROM_CID_DEFAULT:
1825 if (rtlefuse->eeprom_did == 0x8176) {
1826 if (rtlefuse->eeprom_svid == 0x10EC &&
1827 is_toshiba_smid1) {
1828 rtlhal->oem_id = RT_CID_TOSHIBA;
1829 } else if (rtlefuse->eeprom_svid == 0x1025) {
1830 rtlhal->oem_id = RT_CID_819X_ACER;
1831 } else if (rtlefuse->eeprom_svid == 0x10EC &&
1832 is_samsung_smid) {
1833 rtlhal->oem_id = RT_CID_819X_SAMSUNG;
1834 } else if (rtlefuse->eeprom_svid == 0x10EC &&
1835 is_lenovo_smid) {
1836 rtlhal->oem_id = RT_CID_819X_LENOVO;
1837 } else if ((rtlefuse->eeprom_svid == 0x10EC &&
1838 rtlefuse->eeprom_smid == 0x8197) ||
1839 (rtlefuse->eeprom_svid == 0x10EC &&
1840 rtlefuse->eeprom_smid == 0x9196)) {
1841 rtlhal->oem_id = RT_CID_819X_CLEVO;
1842 } else if ((rtlefuse->eeprom_svid == 0x1028 &&
1843 rtlefuse->eeprom_smid == 0x8194) ||
1844 (rtlefuse->eeprom_svid == 0x1028 &&
1845 rtlefuse->eeprom_smid == 0x8198) ||
1846 (rtlefuse->eeprom_svid == 0x1028 &&
1847 rtlefuse->eeprom_smid == 0x9197) ||
1848 (rtlefuse->eeprom_svid == 0x1028 &&
1849 rtlefuse->eeprom_smid == 0x9198)) {
1850 rtlhal->oem_id = RT_CID_819X_DELL;
1851 } else if ((rtlefuse->eeprom_svid == 0x103C &&
1852 rtlefuse->eeprom_smid == 0x1629)) {
1853 rtlhal->oem_id = RT_CID_819X_HP;
1854 } else if ((rtlefuse->eeprom_svid == 0x1A32 &&
1855 rtlefuse->eeprom_smid == 0x2315)) {
1856 rtlhal->oem_id = RT_CID_819X_QMI;
1857 } else if ((rtlefuse->eeprom_svid == 0x10EC &&
1858 rtlefuse->eeprom_smid == 0x8203)) {
1859 rtlhal->oem_id = RT_CID_819X_PRONETS;
1860 } else if ((rtlefuse->eeprom_svid == 0x1043 &&
1861 rtlefuse->eeprom_smid == 0x84B5)) {
1862 rtlhal->oem_id = RT_CID_819X_EDIMAX_ASUS;
1863 } else {
1864 rtlhal->oem_id = RT_CID_DEFAULT;
1865 }
1866 } else if (rtlefuse->eeprom_did == 0x8178) {
1867 if (rtlefuse->eeprom_svid == 0x10EC &&
1868 is_toshiba_smid2)
1869 rtlhal->oem_id = RT_CID_TOSHIBA;
1870 else if (rtlefuse->eeprom_svid == 0x1025)
1871 rtlhal->oem_id = RT_CID_819X_ACER;
1872 else if ((rtlefuse->eeprom_svid == 0x10EC &&
1873 rtlefuse->eeprom_smid == 0x8186))
1874 rtlhal->oem_id = RT_CID_819X_PRONETS;
1875 else if ((rtlefuse->eeprom_svid == 0x1043 &&
1876 rtlefuse->eeprom_smid == 0x84B6))
1877 rtlhal->oem_id =
1878 RT_CID_819X_EDIMAX_ASUS;
1879 else
1880 rtlhal->oem_id = RT_CID_DEFAULT;
1881 } else {
1882 rtlhal->oem_id = RT_CID_DEFAULT;
1883 }
1884 break;
1885 case EEPROM_CID_TOSHIBA:
1886 rtlhal->oem_id = RT_CID_TOSHIBA;
1887 break;
1888 case EEPROM_CID_CCX:
1889 rtlhal->oem_id = RT_CID_CCX;
1890 break;
1891 case EEPROM_CID_QMI:
1892 rtlhal->oem_id = RT_CID_819X_QMI;
1893 break;
1894 case EEPROM_CID_WHQL:
1895 break;
1896 default:
1897 rtlhal->oem_id = RT_CID_DEFAULT;
1898 break;
1899 }
1900 }
1901}
1902
1903static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
1904{
1905 struct rtl_priv *rtlpriv = rtl_priv(hw);
1906 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1907 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1908
1909 pcipriv->ledctl.led_opendrain = true;
1910 switch (rtlhal->oem_id) {
1911 case RT_CID_819X_HP:
1912 pcipriv->ledctl.led_opendrain = true;
1913 break;
1914 case RT_CID_819X_LENOVO:
1915 case RT_CID_DEFAULT:
1916 case RT_CID_TOSHIBA:
1917 case RT_CID_CCX:
1918 case RT_CID_819X_ACER:
1919 case RT_CID_WHQL:
1920 default:
1921 break;
1922 }
1923 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1924 "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
1925}
1926
1927void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
1928{
1929 struct rtl_priv *rtlpriv = rtl_priv(hw);
1930 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1931 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1932 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1933 u8 tmp_u1b;
1934
1935 rtlhal->version = _rtl8723be_read_chip_version(hw);
1936 if (get_rf_type(rtlphy) == RF_1T1R)
1937 rtlpriv->dm.rfpath_rxenable[0] = true;
1938 else
1939 rtlpriv->dm.rfpath_rxenable[0] =
1940 rtlpriv->dm.rfpath_rxenable[1] = true;
1941 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
1942 rtlhal->version);
1943 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
1944 if (tmp_u1b & BIT(4)) {
1945 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
1946 rtlefuse->epromtype = EEPROM_93C46;
1947 } else {
1948 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
1949 rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
1950 }
1951 if (tmp_u1b & BIT(5)) {
1952 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
1953 rtlefuse->autoload_failflag = false;
1954 _rtl8723be_read_adapter_info(hw, false);
1955 } else {
1956 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
1957 }
1958 _rtl8723be_hal_customized_behavior(hw);
1959}
1960
1961static void rtl8723be_update_hal_rate_table(struct ieee80211_hw *hw,
1962 struct ieee80211_sta *sta)
1963{
1964 struct rtl_priv *rtlpriv = rtl_priv(hw);
1965 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1966 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1967 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1968 u32 ratr_value;
1969 u8 ratr_index = 0;
1970 u8 nmode = mac->ht_enable;
1971 u8 mimo_ps = IEEE80211_SMPS_OFF;
1972 u16 shortgi_rate;
1973 u32 tmp_ratr_value;
1974 u8 curtxbw_40mhz = mac->bw_40;
1975 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1976 1 : 0;
1977 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1978 1 : 0;
1979 enum wireless_mode wirelessmode = mac->mode;
1980
1981 if (rtlhal->current_bandtype == BAND_ON_5G)
1982 ratr_value = sta->supp_rates[1] << 4;
1983 else
1984 ratr_value = sta->supp_rates[0];
1985 if (mac->opmode == NL80211_IFTYPE_ADHOC)
1986 ratr_value = 0xfff;
1987 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1988 sta->ht_cap.mcs.rx_mask[0] << 12);
1989 switch (wirelessmode) {
1990 case WIRELESS_MODE_B:
1991 if (ratr_value & 0x0000000c)
1992 ratr_value &= 0x0000000d;
1993 else
1994 ratr_value &= 0x0000000f;
1995 break;
1996 case WIRELESS_MODE_G:
1997 ratr_value &= 0x00000FF5;
1998 break;
1999 case WIRELESS_MODE_N_24G:
2000 case WIRELESS_MODE_N_5G:
2001 nmode = 1;
2002 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2003 ratr_value &= 0x0007F005;
2004 } else {
2005 u32 ratr_mask;
2006
2007 if (get_rf_type(rtlphy) == RF_1T2R ||
2008 get_rf_type(rtlphy) == RF_1T1R)
2009 ratr_mask = 0x000ff005;
2010 else
2011 ratr_mask = 0x0f0ff005;
2012 ratr_value &= ratr_mask;
2013 }
2014 break;
2015 default:
2016 if (rtlphy->rf_type == RF_1T2R)
2017 ratr_value &= 0x000ff0ff;
2018 else
2019 ratr_value &= 0x0f0ff0ff;
2020 break;
2021 }
2022 if ((rtlpriv->btcoexist.bt_coexistence) &&
2023 (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
2024 (rtlpriv->btcoexist.bt_cur_state) &&
2025 (rtlpriv->btcoexist.bt_ant_isolation) &&
2026 ((rtlpriv->btcoexist.bt_service == BT_SCO) ||
2027 (rtlpriv->btcoexist.bt_service == BT_BUSY)))
2028 ratr_value &= 0x0fffcfc0;
2029 else
2030 ratr_value &= 0x0FFFFFFF;
2031
2032 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
2033 (!curtxbw_40mhz && curshortgi_20mhz))) {
2034 ratr_value |= 0x10000000;
2035 tmp_ratr_value = (ratr_value >> 12);
2036
2037 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2038 if ((1 << shortgi_rate) & tmp_ratr_value)
2039 break;
2040 }
2041 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2042 (shortgi_rate << 4) | (shortgi_rate);
2043 }
2044 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2045
2046 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2047 "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
2048}
2049
2050static u8 _rtl8723be_mrate_idx_to_arfr_id(struct ieee80211_hw *hw,
2051 u8 rate_index)
2052{
2053 u8 ret = 0;
2054
2055 switch (rate_index) {
2056 case RATR_INX_WIRELESS_NGB:
2057 ret = 1;
2058 break;
2059 case RATR_INX_WIRELESS_N:
2060 case RATR_INX_WIRELESS_NG:
2061 ret = 5;
2062 break;
2063 case RATR_INX_WIRELESS_NB:
2064 ret = 3;
2065 break;
2066 case RATR_INX_WIRELESS_GB:
2067 ret = 6;
2068 break;
2069 case RATR_INX_WIRELESS_G:
2070 ret = 7;
2071 break;
2072 case RATR_INX_WIRELESS_B:
2073 ret = 8;
2074 break;
2075 default:
2076 ret = 0;
2077 break;
2078 }
2079 return ret;
2080}
2081
2082static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
2083 struct ieee80211_sta *sta,
2084 u8 rssi_level)
2085{
2086 struct rtl_priv *rtlpriv = rtl_priv(hw);
2087 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2088 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2089 struct rtl_sta_info *sta_entry = NULL;
2090 u32 ratr_bitmap;
2091 u8 ratr_index;
2092 u8 curtxbw_40mhz = (sta->ht_cap.cap &
2093 IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
2094 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
2095 1 : 0;
2096 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
2097 1 : 0;
2098 enum wireless_mode wirelessmode = 0;
2099 bool shortgi = false;
2100 u8 rate_mask[7];
2101 u8 macid = 0;
2102 u8 mimo_ps = IEEE80211_SMPS_OFF;
2103
2104 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
2105 wirelessmode = sta_entry->wireless_mode;
2106 if (mac->opmode == NL80211_IFTYPE_STATION ||
2107 mac->opmode == NL80211_IFTYPE_MESH_POINT)
2108 curtxbw_40mhz = mac->bw_40;
2109 else if (mac->opmode == NL80211_IFTYPE_AP ||
2110 mac->opmode == NL80211_IFTYPE_ADHOC)
2111 macid = sta->aid + 1;
2112
2113 ratr_bitmap = sta->supp_rates[0];
2114
2115 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2116 ratr_bitmap = 0xfff;
2117
2118 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2119 sta->ht_cap.mcs.rx_mask[0] << 12);
2120 switch (wirelessmode) {
2121 case WIRELESS_MODE_B:
2122 ratr_index = RATR_INX_WIRELESS_B;
2123 if (ratr_bitmap & 0x0000000c)
2124 ratr_bitmap &= 0x0000000d;
2125 else
2126 ratr_bitmap &= 0x0000000f;
2127 break;
2128 case WIRELESS_MODE_G:
2129 ratr_index = RATR_INX_WIRELESS_GB;
2130
2131 if (rssi_level == 1)
2132 ratr_bitmap &= 0x00000f00;
2133 else if (rssi_level == 2)
2134 ratr_bitmap &= 0x00000ff0;
2135 else
2136 ratr_bitmap &= 0x00000ff5;
2137 break;
2138 case WIRELESS_MODE_A:
2139 ratr_index = RATR_INX_WIRELESS_A;
2140 ratr_bitmap &= 0x00000ff0;
2141 break;
2142 case WIRELESS_MODE_N_24G:
2143 case WIRELESS_MODE_N_5G:
2144 ratr_index = RATR_INX_WIRELESS_NGB;
2145
2146 if (mimo_ps == IEEE80211_SMPS_STATIC ||
2147 mimo_ps == IEEE80211_SMPS_DYNAMIC) {
2148 if (rssi_level == 1)
2149 ratr_bitmap &= 0x00070000;
2150 else if (rssi_level == 2)
2151 ratr_bitmap &= 0x0007f000;
2152 else
2153 ratr_bitmap &= 0x0007f005;
2154 } else {
2155 if (rtlphy->rf_type == RF_1T1R) {
2156 if (curtxbw_40mhz) {
2157 if (rssi_level == 1)
2158 ratr_bitmap &= 0x000f0000;
2159 else if (rssi_level == 2)
2160 ratr_bitmap &= 0x000ff000;
2161 else
2162 ratr_bitmap &= 0x000ff015;
2163 } else {
2164 if (rssi_level == 1)
2165 ratr_bitmap &= 0x000f0000;
2166 else if (rssi_level == 2)
2167 ratr_bitmap &= 0x000ff000;
2168 else
2169 ratr_bitmap &= 0x000ff005;
2170 }
2171 } else {
2172 if (curtxbw_40mhz) {
2173 if (rssi_level == 1)
2174 ratr_bitmap &= 0x0f8f0000;
2175 else if (rssi_level == 2)
2176 ratr_bitmap &= 0x0f8ff000;
2177 else
2178 ratr_bitmap &= 0x0f8ff015;
2179 } else {
2180 if (rssi_level == 1)
2181 ratr_bitmap &= 0x0f8f0000;
2182 else if (rssi_level == 2)
2183 ratr_bitmap &= 0x0f8ff000;
2184 else
2185 ratr_bitmap &= 0x0f8ff005;
2186 }
2187 }
2188 }
2189 if ((curtxbw_40mhz && curshortgi_40mhz) ||
2190 (!curtxbw_40mhz && curshortgi_20mhz)) {
2191 if (macid == 0)
2192 shortgi = true;
2193 else if (macid == 1)
2194 shortgi = false;
2195 }
2196 break;
2197 default:
2198 ratr_index = RATR_INX_WIRELESS_NGB;
2199
2200 if (rtlphy->rf_type == RF_1T2R)
2201 ratr_bitmap &= 0x000ff0ff;
2202 else
2203 ratr_bitmap &= 0x0f0ff0ff;
2204 break;
2205 }
2206 sta_entry->ratr_index = ratr_index;
2207
2208 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2209 "ratr_bitmap :%x\n", ratr_bitmap);
2210 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
2211 rate_mask[0] = macid;
2212 rate_mask[1] = _rtl8723be_mrate_idx_to_arfr_id(hw, ratr_index) |
2213 (shortgi ? 0x80 : 0x00);
2214 rate_mask[2] = curtxbw_40mhz;
2215 /* if (prox_priv->proxim_modeinfo->power_output > 0)
2216 * rate_mask[2] |= BIT(6);
2217 */
2218
2219 rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
2220 rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
2221 rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
2222 rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
2223
2224 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2225 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
2226 ratr_index, ratr_bitmap,
2227 rate_mask[0], rate_mask[1],
2228 rate_mask[2], rate_mask[3],
2229 rate_mask[4], rate_mask[5],
2230 rate_mask[6]);
2231 rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RA_MASK, 7, rate_mask);
2232 _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
2233}
2234
2235void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
2236 struct ieee80211_sta *sta,
2237 u8 rssi_level)
2238{
2239 struct rtl_priv *rtlpriv = rtl_priv(hw);
2240 if (rtlpriv->dm.useramask)
2241 rtl8723be_update_hal_rate_mask(hw, sta, rssi_level);
2242 else
2243 rtl8723be_update_hal_rate_table(hw, sta);
2244}
2245
2246void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw)
2247{
2248 struct rtl_priv *rtlpriv = rtl_priv(hw);
2249 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2250 u16 sifs_timer;
2251
2252 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
2253 if (!mac->ht_enable)
2254 sifs_timer = 0x0a0a;
2255 else
2256 sifs_timer = 0x0e0e;
2257 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2258}
2259
2260bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
2261{
2262 struct rtl_priv *rtlpriv = rtl_priv(hw);
2263 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2264 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2265 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
2266 u8 u1tmp;
2267 bool actuallyset = false;
2268
2269 if (rtlpriv->rtlhal.being_init_adapter)
2270 return false;
2271
2272 if (ppsc->swrf_processing)
2273 return false;
2274
2275 spin_lock(&rtlpriv->locks.rf_ps_lock);
2276 if (ppsc->rfchange_inprogress) {
2277 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2278 return false;
2279 } else {
2280 ppsc->rfchange_inprogress = true;
2281 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2282 }
2283 cur_rfstate = ppsc->rfpwr_state;
2284
2285 rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
2286 rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
2287
2288 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
2289
2290 if (rtlphy->polarity_ctl)
2291 e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
2292 else
2293 e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
2294
2295 if (ppsc->hwradiooff &&
2296 (e_rfpowerstate_toset == ERFON)) {
2297 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2298 "GPIOChangeRF - HW Radio ON, RF ON\n");
2299
2300 e_rfpowerstate_toset = ERFON;
2301 ppsc->hwradiooff = false;
2302 actuallyset = true;
2303 } else if (!ppsc->hwradiooff &&
2304 (e_rfpowerstate_toset == ERFOFF)) {
2305 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2306 "GPIOChangeRF - HW Radio OFF, RF OFF\n");
2307
2308 e_rfpowerstate_toset = ERFOFF;
2309 ppsc->hwradiooff = true;
2310 actuallyset = true;
2311 }
2312 if (actuallyset) {
2313 spin_lock(&rtlpriv->locks.rf_ps_lock);
2314 ppsc->rfchange_inprogress = false;
2315 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2316 } else {
2317 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
2318 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2319
2320 spin_lock(&rtlpriv->locks.rf_ps_lock);
2321 ppsc->rfchange_inprogress = false;
2322 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2323 }
2324 *valid = 1;
2325 return !ppsc->hwradiooff;
2326}
2327
2328void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
2329 u8 *p_macaddr, bool is_group, u8 enc_algo,
2330 bool is_wepkey, bool clear_all)
2331{
2332 struct rtl_priv *rtlpriv = rtl_priv(hw);
2333 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2334 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
2335 u8 *macaddr = p_macaddr;
2336 u32 entry_id = 0;
2337 bool is_pairwise = false;
2338
2339 static u8 cam_const_addr[4][6] = {
2340 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
2341 {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
2342 {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
2343 {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
2344 };
2345 static u8 cam_const_broad[] = {
2346 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2347 };
2348
2349 if (clear_all) {
2350 u8 idx = 0;
2351 u8 cam_offset = 0;
2352 u8 clear_number = 5;
2353
2354 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
2355
2356 for (idx = 0; idx < clear_number; idx++) {
2357 rtl_cam_mark_invalid(hw, cam_offset + idx);
2358 rtl_cam_empty_entry(hw, cam_offset + idx);
2359
2360 if (idx < 5) {
2361 memset(rtlpriv->sec.key_buf[idx], 0,
2362 MAX_KEY_LEN);
2363 rtlpriv->sec.key_len[idx] = 0;
2364 }
2365 }
2366 } else {
2367 switch (enc_algo) {
2368 case WEP40_ENCRYPTION:
2369 enc_algo = CAM_WEP40;
2370 break;
2371 case WEP104_ENCRYPTION:
2372 enc_algo = CAM_WEP104;
2373 break;
2374 case TKIP_ENCRYPTION:
2375 enc_algo = CAM_TKIP;
2376 break;
2377 case AESCCMP_ENCRYPTION:
2378 enc_algo = CAM_AES;
2379 break;
2380 default:
2381 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2382 "switch case not process\n");
2383 enc_algo = CAM_TKIP;
2384 break;
2385 }
2386
2387 if (is_wepkey || rtlpriv->sec.use_defaultkey) {
2388 macaddr = cam_const_addr[key_index];
2389 entry_id = key_index;
2390 } else {
2391 if (is_group) {
2392 macaddr = cam_const_broad;
2393 entry_id = key_index;
2394 } else {
2395 if (mac->opmode == NL80211_IFTYPE_AP) {
2396 entry_id = rtl_cam_get_free_entry(hw,
2397 p_macaddr);
2398 if (entry_id >= TOTAL_CAM_ENTRY) {
2399 RT_TRACE(rtlpriv, COMP_SEC,
2400 DBG_EMERG,
2401 "Can not find free"
2402 " hw security cam "
2403 "entry\n");
2404 return;
2405 }
2406 } else {
2407 entry_id = CAM_PAIRWISE_KEY_POSITION;
2408 }
2409 key_index = PAIRWISE_KEYIDX;
2410 is_pairwise = true;
2411 }
2412 }
2413 if (rtlpriv->sec.key_len[key_index] == 0) {
2414 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2415 "delete one entry, entry_id is %d\n",
2416 entry_id);
2417 if (mac->opmode == NL80211_IFTYPE_AP)
2418 rtl_cam_del_entry(hw, p_macaddr);
2419 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
2420 } else {
2421 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2422 "add one entry\n");
2423 if (is_pairwise) {
2424 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2425 "set Pairwise key\n");
2426
2427 rtl_cam_add_one_entry(hw, macaddr, key_index,
2428 entry_id, enc_algo,
2429 CAM_CONFIG_NO_USEDK,
2430 rtlpriv->sec.key_buf[key_index]);
2431 } else {
2432 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2433 "set group key\n");
2434
2435 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
2436 rtl_cam_add_one_entry(hw,
2437 rtlefuse->dev_addr,
2438 PAIRWISE_KEYIDX,
2439 CAM_PAIRWISE_KEY_POSITION,
2440 enc_algo,
2441 CAM_CONFIG_NO_USEDK,
2442 rtlpriv->sec.key_buf
2443 [entry_id]);
2444 }
2445 rtl_cam_add_one_entry(hw, macaddr, key_index,
2446 entry_id, enc_algo,
2447 CAM_CONFIG_NO_USEDK,
2448 rtlpriv->sec.key_buf[entry_id]);
2449 }
2450 }
2451 }
2452}
2453
2454void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2455 bool auto_load_fail, u8 *hwinfo)
2456{
2457 struct rtl_priv *rtlpriv = rtl_priv(hw);
2458 u8 value;
2459 u32 tmpu_32;
2460
2461 if (!auto_load_fail) {
2462 tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
2463 if (tmpu_32 & BIT(18))
2464 rtlpriv->btcoexist.btc_info.btcoexist = 1;
2465 else
2466 rtlpriv->btcoexist.btc_info.btcoexist = 0;
2467 value = hwinfo[RF_OPTION4];
2468 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
2469 rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
2470 } else {
2471 rtlpriv->btcoexist.btc_info.btcoexist = 0;
2472 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
2473 rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
2474 }
2475}
2476
2477void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
2478{
2479 struct rtl_priv *rtlpriv = rtl_priv(hw);
2480
2481 /* 0:Low, 1:High, 2:From Efuse. */
2482 rtlpriv->btcoexist.reg_bt_iso = 2;
2483 /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
2484 rtlpriv->btcoexist.reg_bt_sco = 3;
2485 /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
2486 rtlpriv->btcoexist.reg_bt_sco = 0;
2487}
2488
2489void rtl8723be_bt_hw_init(struct ieee80211_hw *hw)
2490{
2491 struct rtl_priv *rtlpriv = rtl_priv(hw);
2492
2493 if (rtlpriv->cfg->ops->get_btc_status())
2494 rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
2495}
2496
2497void rtl8723be_suspend(struct ieee80211_hw *hw)
2498{
2499}
2500
2501void rtl8723be_resume(struct ieee80211_hw *hw)
2502{
2503}
2504
2505/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2506void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
2507 bool write_into_reg)
2508{
2509 struct rtl_priv *rtlpriv = rtl_priv(hw);
2510 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2511
2512 if (allow_all_da) /* Set BIT0 */
2513 rtlpci->receive_config |= RCR_AAP;
2514 else /* Clear BIT0 */
2515 rtlpci->receive_config &= ~RCR_AAP;
2516
2517 if (write_into_reg)
2518 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2519
2520 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2521 "receive_config = 0x%08X, write_into_reg =%d\n",
2522 rtlpci->receive_config, write_into_reg);
2523}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
new file mode 100644
index 000000000000..b7449a9b57e4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
@@ -0,0 +1,64 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_HW_H__
27#define __RTL8723BE_HW_H__
28
29void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
30void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
31
32void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
33 u32 *p_inta, u32 *p_intb);
34int rtl8723be_hw_init(struct ieee80211_hw *hw);
35void rtl8723be_card_disable(struct ieee80211_hw *hw);
36void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
37void rtl8723be_disable_interrupt(struct ieee80211_hw *hw);
38int rtl8723be_set_network_type(struct ieee80211_hw *hw,
39 enum nl80211_iftype type);
40void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
41void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci);
42void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw);
43void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw);
44void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
45 u32 add_msr, u32 rm_msr);
46void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
47void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
48 struct ieee80211_sta *sta,
49 u8 rssi_level);
50void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw);
51bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
52void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw);
53void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
54 u8 *p_macaddr, bool is_group, u8 enc_algo,
55 bool is_wepkey, bool clear_all);
56void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
57 bool autoload_fail, u8 *hwinfo);
58void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
59void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
60void rtl8723be_suspend(struct ieee80211_hw *hw);
61void rtl8723be_resume(struct ieee80211_hw *hw);
62void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
63 bool write_into_reg);
64#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/rtlwifi/rtl8723be/led.c
new file mode 100644
index 000000000000..cb931a38dc48
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/led.c
@@ -0,0 +1,153 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../pci.h"
28#include "reg.h"
29#include "led.h"
30
31static void _rtl8723be_init_led(struct ieee80211_hw *hw, struct rtl_led *pled,
32 enum rtl_led_pin ledpin)
33{
34 pled->hw = hw;
35 pled->ledpin = ledpin;
36 pled->ledon = false;
37}
38
39void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
40{
41 u8 ledcfg;
42 struct rtl_priv *rtlpriv = rtl_priv(hw);
43
44 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
45 "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
46
47 switch (pled->ledpin) {
48 case LED_PIN_GPIO0:
49 break;
50 case LED_PIN_LED0:
51 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
52 ledcfg &= ~BIT(6);
53 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
54 break;
55 case LED_PIN_LED1:
56 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
57 rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
58 break;
59 default:
60 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
61 "switch case not process\n");
62 break;
63 }
64 pled->ledon = true;
65}
66
67void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
68{
69 struct rtl_priv *rtlpriv = rtl_priv(hw);
70 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
71 u8 ledcfg;
72
73 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
74 "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
75
76 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
77
78 switch (pled->ledpin) {
79 case LED_PIN_GPIO0:
80 break;
81 case LED_PIN_LED0:
82 ledcfg &= 0xf0;
83 if (pcipriv->ledctl.led_opendrain) {
84 ledcfg &= 0x90; /* Set to software control. */
85 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
86 ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
87 ledcfg &= 0xFE;
88 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
89 } else {
90 ledcfg &= ~BIT(6);
91 rtl_write_byte(rtlpriv, REG_LEDCFG2,
92 (ledcfg | BIT(3) | BIT(5)));
93 }
94 break;
95 case LED_PIN_LED1:
96 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
97 ledcfg &= 0x10; /* Set to software control. */
98 rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
99
100 break;
101 default:
102 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
103 "switch case not processed\n");
104 break;
105 }
106 pled->ledon = false;
107}
108
109void rtl8723be_init_sw_leds(struct ieee80211_hw *hw)
110{
111 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
112 _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
113 _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
114}
115
116static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw,
117 enum led_ctl_mode ledaction)
118{
119 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
120 struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
121 switch (ledaction) {
122 case LED_CTL_POWER_ON:
123 case LED_CTL_LINK:
124 case LED_CTL_NO_LINK:
125 rtl8723be_sw_led_on(hw, pled0);
126 break;
127 case LED_CTL_POWER_OFF:
128 rtl8723be_sw_led_off(hw, pled0);
129 break;
130 default:
131 break;
132 }
133}
134
135void rtl8723be_led_control(struct ieee80211_hw *hw,
136 enum led_ctl_mode ledaction)
137{
138 struct rtl_priv *rtlpriv = rtl_priv(hw);
139 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
140
141 if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
142 (ledaction == LED_CTL_TX ||
143 ledaction == LED_CTL_RX ||
144 ledaction == LED_CTL_SITE_SURVEY ||
145 ledaction == LED_CTL_LINK ||
146 ledaction == LED_CTL_NO_LINK ||
147 ledaction == LED_CTL_START_TO_LINK ||
148 ledaction == LED_CTL_POWER_ON)) {
149 return;
150 }
151 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
152 _rtl8723be_sw_led_control(hw, ledaction);
153}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/rtlwifi/rtl8723be/led.h
new file mode 100644
index 000000000000..c57de379ee8d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/led.h
@@ -0,0 +1,35 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_LED_H__
27#define __RTL8723BE_LED_H__
28
29void rtl8723be_init_sw_leds(struct ieee80211_hw *hw);
30void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
31void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
32void rtl8723be_led_control(struct ieee80211_hw *hw,
33 enum led_ctl_mode ledaction);
34
35#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
new file mode 100644
index 000000000000..1575ef9ece9f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
@@ -0,0 +1,2156 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../pci.h"
28#include "../ps.h"
29#include "../core.h"
30#include "reg.h"
31#include "def.h"
32#include "phy.h"
33#include "../rtl8723com/phy_common.h"
34#include "rf.h"
35#include "dm.h"
36#include "table.h"
37#include "trx.h"
38
39static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
40static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
41 u8 configtype);
42static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
43 u8 channel, u8 *stage,
44 u8 *step, u32 *delay);
45static bool _rtl8723be_check_condition(struct ieee80211_hw *hw,
46 const u32 condition)
47{
48 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
49 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
50 u32 _board = rtlefuse->board_type; /*need efuse define*/
51 u32 _interface = rtlhal->interface;
52 u32 _platform = 0x08;/*SupportPlatform */
53 u32 cond = condition;
54
55 if (condition == 0xCDCDCDCD)
56 return true;
57
58 cond = condition & 0xFF;
59 if ((_board & cond) == 0 && cond != 0x1F)
60 return false;
61
62 cond = condition & 0xFF00;
63 cond = cond >> 8;
64 if ((_interface & cond) == 0 && cond != 0x07)
65 return false;
66
67 cond = condition & 0xFF0000;
68 cond = cond >> 16;
69 if ((_platform & cond) == 0 && cond != 0x0F)
70 return false;
71 return true;
72}
73
74static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
75{
76 struct rtl_priv *rtlpriv = rtl_priv(hw);
77 u32 i;
78 u32 arraylength;
79 u32 *ptrarray;
80
81 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
82 arraylength = RTL8723BEMAC_1T_ARRAYLEN;
83 ptrarray = RTL8723BEMAC_1T_ARRAY;
84 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
85 "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength);
86 for (i = 0; i < arraylength; i = i + 2)
87 rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
88 return true;
89}
90
91static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
92 u8 configtype)
93{
94 #define READ_NEXT_PAIR(v1, v2, i) \
95 do { \
96 i += 2; \
97 v1 = array_table[i];\
98 v2 = array_table[i+1]; \
99 } while (0)
100
101 int i;
102 u32 *array_table;
103 u16 arraylen;
104 struct rtl_priv *rtlpriv = rtl_priv(hw);
105 u32 v1 = 0, v2 = 0;
106
107 if (configtype == BASEBAND_CONFIG_PHY_REG) {
108 arraylen = RTL8723BEPHY_REG_1TARRAYLEN;
109 array_table = RTL8723BEPHY_REG_1TARRAY;
110
111 for (i = 0; i < arraylen; i = i + 2) {
112 v1 = array_table[i];
113 v2 = array_table[i+1];
114 if (v1 < 0xcdcdcdcd) {
115 rtl_bb_delay(hw, v1, v2);
116 } else {/*This line is the start line of branch.*/
117 if (!_rtl8723be_check_condition(hw, array_table[i])) {
118 /*Discard the following (offset, data) pairs*/
119 READ_NEXT_PAIR(v1, v2, i);
120 while (v2 != 0xDEAD &&
121 v2 != 0xCDEF &&
122 v2 != 0xCDCD &&
123 i < arraylen - 2) {
124 READ_NEXT_PAIR(v1, v2, i);
125 }
126 i -= 2; /* prevent from for-loop += 2*/
127 /* Configure matched pairs and
128 * skip to end of if-else.
129 */
130 } else {
131 READ_NEXT_PAIR(v1, v2, i);
132 while (v2 != 0xDEAD &&
133 v2 != 0xCDEF &&
134 v2 != 0xCDCD &&
135 i < arraylen - 2) {
136 rtl_bb_delay(hw,
137 v1, v2);
138 READ_NEXT_PAIR(v1, v2, i);
139 }
140
141 while (v2 != 0xDEAD && i < arraylen - 2)
142 READ_NEXT_PAIR(v1, v2, i);
143 }
144 }
145 }
146 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
147 arraylen = RTL8723BEAGCTAB_1TARRAYLEN;
148 array_table = RTL8723BEAGCTAB_1TARRAY;
149
150 for (i = 0; i < arraylen; i = i + 2) {
151 v1 = array_table[i];
152 v2 = array_table[i+1];
153 if (v1 < 0xCDCDCDCD) {
154 rtl_set_bbreg(hw, array_table[i],
155 MASKDWORD,
156 array_table[i + 1]);
157 udelay(1);
158 continue;
159 } else {/*This line is the start line of branch.*/
160 if (!_rtl8723be_check_condition(hw, array_table[i])) {
161 /* Discard the following
162 * (offset, data) pairs
163 */
164 READ_NEXT_PAIR(v1, v2, i);
165 while (v2 != 0xDEAD &&
166 v2 != 0xCDEF &&
167 v2 != 0xCDCD &&
168 i < arraylen - 2) {
169 READ_NEXT_PAIR(v1, v2, i);
170 }
171 i -= 2; /* prevent from for-loop += 2*/
172 /*Configure matched pairs and
173 *skip to end of if-else.
174 */
175 } else {
176 READ_NEXT_PAIR(v1, v2, i);
177 while (v2 != 0xDEAD &&
178 v2 != 0xCDEF &&
179 v2 != 0xCDCD &&
180 i < arraylen - 2) {
181 rtl_set_bbreg(hw, array_table[i],
182 MASKDWORD,
183 array_table[i + 1]);
184 udelay(1);
185 READ_NEXT_PAIR(v1, v2, i);
186 }
187
188 while (v2 != 0xDEAD && i < arraylen - 2)
189 READ_NEXT_PAIR(v1, v2, i);
190 }
191 }
192 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
193 "The agctab_array_table[0] is "
194 "%x Rtl818EEPHY_REGArray[1] is %x\n",
195 array_table[i], array_table[i + 1]);
196 }
197 }
198 return true;
199}
200
201static u8 _rtl8723be_get_rate_section_index(u32 regaddr)
202{
203 u8 index = 0;
204
205 switch (regaddr) {
206 case RTXAGC_A_RATE18_06:
207 case RTXAGC_B_RATE18_06:
208 index = 0;
209 break;
210 case RTXAGC_A_RATE54_24:
211 case RTXAGC_B_RATE54_24:
212 index = 1;
213 break;
214 case RTXAGC_A_CCK1_MCS32:
215 case RTXAGC_B_CCK1_55_MCS32:
216 index = 2;
217 break;
218 case RTXAGC_B_CCK11_A_CCK2_11:
219 index = 3;
220 break;
221 case RTXAGC_A_MCS03_MCS00:
222 case RTXAGC_B_MCS03_MCS00:
223 index = 4;
224 break;
225 case RTXAGC_A_MCS07_MCS04:
226 case RTXAGC_B_MCS07_MCS04:
227 index = 5;
228 break;
229 case RTXAGC_A_MCS11_MCS08:
230 case RTXAGC_B_MCS11_MCS08:
231 index = 6;
232 break;
233 case RTXAGC_A_MCS15_MCS12:
234 case RTXAGC_B_MCS15_MCS12:
235 index = 7;
236 break;
237 default:
238 regaddr &= 0xFFF;
239 if (regaddr >= 0xC20 && regaddr <= 0xC4C)
240 index = (u8) ((regaddr - 0xC20) / 4);
241 else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
242 index = (u8) ((regaddr - 0xE20) / 4);
243 break;
244 };
245 return index;
246}
247
248u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
249 u32 regaddr, u32 bitmask)
250{
251 struct rtl_priv *rtlpriv = rtl_priv(hw);
252 u32 original_value, readback_value, bitshift;
253 unsigned long flags;
254
255 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
256 "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
257 regaddr, rfpath, bitmask);
258
259 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
260
261 original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
262 bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
263 readback_value = (original_value & bitmask) >> bitshift;
264
265 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
266
267 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
268 "regaddr(%#x), rfpath(%#x), "
269 "bitmask(%#x), original_value(%#x)\n",
270 regaddr, rfpath, bitmask, original_value);
271
272 return readback_value;
273}
274
275void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
276 u32 regaddr, u32 bitmask, u32 data)
277{
278 struct rtl_priv *rtlpriv = rtl_priv(hw);
279 u32 original_value, bitshift;
280 unsigned long flags;
281
282 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
283 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
284 regaddr, bitmask, data, path);
285
286 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
287
288 if (bitmask != RFREG_OFFSET_MASK) {
289 original_value = rtl8723_phy_rf_serial_read(hw, path,
290 regaddr);
291 bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
292 data = ((original_value & (~bitmask)) |
293 (data << bitshift));
294 }
295
296 rtl8723_phy_rf_serial_write(hw, path, regaddr, data);
297
298 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
299
300 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
301 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
302 regaddr, bitmask, data, path);
303}
304
305bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw)
306{
307 struct rtl_priv *rtlpriv = rtl_priv(hw);
308 bool rtstatus = _rtl8723be_phy_config_mac_with_headerfile(hw);
309
310 rtl_write_byte(rtlpriv, 0x04CA, 0x0B);
311 return rtstatus;
312}
313
314bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw)
315{
316 bool rtstatus = true;
317 struct rtl_priv *rtlpriv = rtl_priv(hw);
318 u16 regval;
319 u8 reg_hwparafile = 1;
320 u32 tmp;
321 u8 crystalcap = rtlpriv->efuse.crystalcap;
322 rtl8723_phy_init_bb_rf_reg_def(hw);
323 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
324 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
325 regval | BIT(13) | BIT(0) | BIT(1));
326
327 rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
328 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
329 FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
330 FEN_BB_GLB_RSTN | FEN_BBRSTB);
331 tmp = rtl_read_dword(rtlpriv, 0x4c);
332 rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23));
333
334 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
335
336 if (reg_hwparafile == 1)
337 rtstatus = _rtl8723be_phy_bb8723b_config_parafile(hw);
338
339 crystalcap = crystalcap & 0x3F;
340 rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
341 (crystalcap | crystalcap << 6));
342
343 return rtstatus;
344}
345
346bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw)
347{
348 return rtl8723be_phy_rf6052_config(hw);
349}
350
351static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr,
352 u32 data, enum radio_path rfpath,
353 u32 regaddr)
354{
355 if (addr == 0xfe || addr == 0xffe) {
356 mdelay(50);
357 } else {
358 rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data);
359 udelay(1);
360 }
361}
362
363static void _rtl8723be_config_rf_radio_a(struct ieee80211_hw *hw,
364 u32 addr, u32 data)
365{
366 u32 content = 0x1000; /*RF Content: radio_a_txt*/
367 u32 maskforphyset = (u32)(content & 0xE000);
368
369 _rtl8723be_config_rf_reg(hw, addr, data, RF90_PATH_A,
370 addr | maskforphyset);
371}
372
373static void _rtl8723be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
374{
375 struct rtl_priv *rtlpriv = rtl_priv(hw);
376 struct rtl_phy *rtlphy = &(rtlpriv->phy);
377
378 u8 band, path, txnum, section;
379
380 for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band)
381 for (path = 0; path < TX_PWR_BY_RATE_NUM_RF; ++path)
382 for (txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum)
383 for (section = 0;
384 section < TX_PWR_BY_RATE_NUM_SECTION;
385 ++section)
386 rtlphy->tx_power_by_rate_offset[band]
387 [path][txnum][section] = 0;
388}
389
390static void phy_set_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band,
391 u8 path, u8 rate_section,
392 u8 txnum, u8 value)
393{
394 struct rtl_priv *rtlpriv = rtl_priv(hw);
395 struct rtl_phy *rtlphy = &(rtlpriv->phy);
396
397 if (path > RF90_PATH_D) {
398 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
399 "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
400 path);
401 return;
402 }
403
404 if (band == BAND_ON_2_4G) {
405 switch (rate_section) {
406 case CCK:
407 rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value;
408 break;
409 case OFDM:
410 rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value;
411 break;
412 case HT_MCS0_MCS7:
413 rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value;
414 break;
415 case HT_MCS8_MCS15:
416 rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
417 break;
418 default:
419 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
420 "Invalid RateSection %d in Band 2.4G, Rf Path"
421 " %d, %dTx in PHY_SetTxPowerByRateBase()\n",
422 rate_section, path, txnum);
423 break;
424 };
425 } else {
426 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
427 "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
428 band);
429 }
430}
431
432static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path,
433 u8 txnum, u8 rate_section)
434{
435 struct rtl_priv *rtlpriv = rtl_priv(hw);
436 struct rtl_phy *rtlphy = &(rtlpriv->phy);
437 u8 value = 0;
438 if (path > RF90_PATH_D) {
439 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
440 "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
441 path);
442 return 0;
443 }
444
445 if (band == BAND_ON_2_4G) {
446 switch (rate_section) {
447 case CCK:
448 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0];
449 break;
450 case OFDM:
451 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1];
452 break;
453 case HT_MCS0_MCS7:
454 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2];
455 break;
456 case HT_MCS8_MCS15:
457 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
458 break;
459 default:
460 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
461 "Invalid RateSection %d in Band 2.4G, Rf Path"
462 " %d, %dTx in PHY_GetTxPowerByRateBase()\n",
463 rate_section, path, txnum);
464 break;
465 };
466 } else {
467 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
468 "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
469 band);
470 }
471
472 return value;
473}
474
475static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
476{
477 struct rtl_priv *rtlpriv = rtl_priv(hw);
478 struct rtl_phy *rtlphy = &(rtlpriv->phy);
479 u16 raw_value = 0;
480 u8 base = 0, path = 0;
481
482 for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
483 if (path == RF90_PATH_A) {
484 raw_value = (u16) (rtlphy->tx_power_by_rate_offset
485 [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & 0xFF;
486 base = (raw_value >> 4) * 10 + (raw_value & 0xF);
487 phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, CCK,
488 RF_1TX, base);
489 } else if (path == RF90_PATH_B) {
490 raw_value = (u16) (rtlphy->tx_power_by_rate_offset
491 [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & 0xFF;
492 base = (raw_value >> 4) * 10 + (raw_value & 0xF);
493 phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
494 CCK, RF_1TX, base);
495 }
496 raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
497 [path][RF_1TX][1] >> 24) & 0xFF;
498 base = (raw_value >> 4) * 10 + (raw_value & 0xF);
499 phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX,
500 base);
501
502 raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
503 [path][RF_1TX][5] >> 24) & 0xFF;
504 base = (raw_value >> 4) * 10 + (raw_value & 0xF);
505 phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7,
506 RF_1TX, base);
507
508 raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
509 [path][RF_2TX][7] >> 24) & 0xFF;
510 base = (raw_value >> 4) * 10 + (raw_value & 0xF);
511 phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
512 HT_MCS8_MCS15, RF_2TX, base);
513 }
514}
515
516static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val)
517{
518 char i = 0;
519 u8 temp_value = 0;
520 u32 temp_data = 0;
521
522 for (i = 3; i >= 0; --i) {
523 if (i >= start && i <= end) {
524 /* Get the exact value */
525 temp_value = (u8) (*data >> (i * 8)) & 0xF;
526 temp_value += ((u8) ((*data >> (i*8 + 4)) & 0xF)) * 10;
527
528 /* Change the value to a relative value */
529 temp_value = (temp_value > base_val) ?
530 temp_value - base_val :
531 base_val - temp_value;
532 } else {
533 temp_value = (u8) (*data >> (i * 8)) & 0xFF;
534 }
535 temp_data <<= 8;
536 temp_data |= temp_value;
537 }
538 *data = temp_data;
539}
540
541static void conv_dbm_to_rel(struct ieee80211_hw *hw)
542{
543 struct rtl_priv *rtlpriv = rtl_priv(hw);
544 struct rtl_phy *rtlphy = &(rtlpriv->phy);
545 u8 base = 0, rfpath = RF90_PATH_A;
546
547 base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
548 RF_1TX, CCK);
549 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
550 [rfpath][RF_1TX][2]), 1, 1, base);
551 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
552 [rfpath][RF_1TX][3]), 1, 3, base);
553
554 base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
555 RF_1TX, OFDM);
556 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
557 [rfpath][RF_1TX][0]), 0, 3, base);
558 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
559 [rfpath][RF_1TX][1]), 0, 3, base);
560
561 base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
562 RF_1TX, HT_MCS0_MCS7);
563 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
564 [rfpath][RF_1TX][4]), 0, 3, base);
565 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
566 [rfpath][RF_1TX][5]), 0, 3, base);
567
568 base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
569 RF_2TX, HT_MCS8_MCS15);
570 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
571 [rfpath][RF_2TX][6]), 0, 3, base);
572
573 phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
574 [rfpath][RF_2TX][7]), 0, 3, base);
575
576 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
577 "<=== conv_dbm_to_rel()\n");
578}
579
580static void _rtl8723be_phy_txpower_by_rate_configuration(
581 struct ieee80211_hw *hw)
582{
583 _rtl8723be_phy_store_txpower_by_rate_base(hw);
584 conv_dbm_to_rel(hw);
585}
586
587static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
588{
589 struct rtl_priv *rtlpriv = rtl_priv(hw);
590 struct rtl_phy *rtlphy = &(rtlpriv->phy);
591 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
592 bool rtstatus;
593
594 rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
595 BASEBAND_CONFIG_PHY_REG);
596 if (!rtstatus) {
597 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
598 return false;
599 }
600 _rtl8723be_phy_init_tx_power_by_rate(hw);
601 if (!rtlefuse->autoload_failflag) {
602 rtlphy->pwrgroup_cnt = 0;
603 rtstatus = _rtl8723be_phy_config_bb_with_pgheaderfile(hw,
604 BASEBAND_CONFIG_PHY_REG);
605 }
606 _rtl8723be_phy_txpower_by_rate_configuration(hw);
607 if (!rtstatus) {
608 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
609 return false;
610 }
611 rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
612 BASEBAND_CONFIG_AGC_TAB);
613 if (!rtstatus) {
614 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
615 return false;
616 }
617 rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
618 RFPGA0_XA_HSSIPARAMETER2,
619 0x200));
620 return true;
621}
622
623static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw,
624 u32 band, u32 rfpath,
625 u32 txnum, u32 regaddr,
626 u32 bitmask, u32 data)
627{
628 struct rtl_priv *rtlpriv = rtl_priv(hw);
629 struct rtl_phy *rtlphy = &(rtlpriv->phy);
630 u8 rate_section = _rtl8723be_get_rate_section_index(regaddr);
631
632 if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
633 RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
634 "Invalid Band %d\n", band);
635 return;
636 }
637
638 if (rfpath > TX_PWR_BY_RATE_NUM_RF) {
639 RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
640 "Invalid RfPath %d\n", rfpath);
641 return;
642 }
643 if (txnum > TX_PWR_BY_RATE_NUM_RF) {
644 RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
645 "Invalid TxNum %d\n", txnum);
646 return;
647 }
648 rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] =
649 data;
650}
651
652static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
653 u8 configtype)
654{
655 struct rtl_priv *rtlpriv = rtl_priv(hw);
656 int i;
657 u32 *phy_regarray_table_pg;
658 u16 phy_regarray_pg_len;
659 u32 v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;
660
661 phy_regarray_pg_len = RTL8723BEPHY_REG_ARRAY_PGLEN;
662 phy_regarray_table_pg = RTL8723BEPHY_REG_ARRAY_PG;
663
664 if (configtype == BASEBAND_CONFIG_PHY_REG) {
665 for (i = 0; i < phy_regarray_pg_len; i = i + 6) {
666 v1 = phy_regarray_table_pg[i];
667 v2 = phy_regarray_table_pg[i+1];
668 v3 = phy_regarray_table_pg[i+2];
669 v4 = phy_regarray_table_pg[i+3];
670 v5 = phy_regarray_table_pg[i+4];
671 v6 = phy_regarray_table_pg[i+5];
672
673 if (v1 < 0xcdcdcdcd) {
674 if (phy_regarray_table_pg[i] == 0xfe ||
675 phy_regarray_table_pg[i] == 0xffe)
676 mdelay(50);
677 else
678 _rtl8723be_store_tx_power_by_rate(hw,
679 v1, v2, v3, v4, v5, v6);
680 continue;
681 } else {
682 /*don't need the hw_body*/
683 if (!_rtl8723be_check_condition(hw,
684 phy_regarray_table_pg[i])) {
685 i += 2; /* skip the pair of expression*/
686 v1 = phy_regarray_table_pg[i];
687 v2 = phy_regarray_table_pg[i+1];
688 v3 = phy_regarray_table_pg[i+2];
689 while (v2 != 0xDEAD) {
690 i += 3;
691 v1 = phy_regarray_table_pg[i];
692 v2 = phy_regarray_table_pg[i+1];
693 v3 = phy_regarray_table_pg[i+2];
694 }
695 }
696 }
697 }
698 } else {
699 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
700 "configtype != BaseBand_Config_PHY_REG\n");
701 }
702 return true;
703}
704
705bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
706 enum radio_path rfpath)
707{
708 #define READ_NEXT_RF_PAIR(v1, v2, i) \
709 do { \
710 i += 2; \
711 v1 = radioa_array_table[i]; \
712 v2 = radioa_array_table[i+1]; \
713 } while (0)
714
715 int i;
716 bool rtstatus = true;
717 u32 *radioa_array_table;
718 u16 radioa_arraylen;
719 struct rtl_priv *rtlpriv = rtl_priv(hw);
720 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
721 u32 v1 = 0, v2 = 0;
722
723 radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN;
724 radioa_array_table = RTL8723BE_RADIOA_1TARRAY;
725 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
726 "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen);
727 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
728 rtstatus = true;
729 switch (rfpath) {
730 case RF90_PATH_A:
731 for (i = 0; i < radioa_arraylen; i = i + 2) {
732 v1 = radioa_array_table[i];
733 v2 = radioa_array_table[i+1];
734 if (v1 < 0xcdcdcdcd) {
735 _rtl8723be_config_rf_radio_a(hw, v1, v2);
736 } else { /*This line is the start line of branch.*/
737 if (!_rtl8723be_check_condition(hw,
738 radioa_array_table[i])) {
739 /* Discard the following
740 * (offset, data) pairs
741 */
742 READ_NEXT_RF_PAIR(v1, v2, i);
743 while (v2 != 0xDEAD &&
744 v2 != 0xCDEF &&
745 v2 != 0xCDCD &&
746 i < radioa_arraylen - 2)
747 READ_NEXT_RF_PAIR(v1, v2, i);
748 i -= 2; /* prevent from for-loop += 2*/
749 } else {
750 /* Configure matched pairs
751 * and skip to end of if-else.
752 */
753 READ_NEXT_RF_PAIR(v1, v2, i);
754 while (v2 != 0xDEAD &&
755 v2 != 0xCDEF &&
756 v2 != 0xCDCD &&
757 i < radioa_arraylen - 2) {
758 _rtl8723be_config_rf_radio_a(hw,
759 v1, v2);
760 READ_NEXT_RF_PAIR(v1, v2, i);
761 }
762
763 while (v2 != 0xDEAD &&
764 i < radioa_arraylen - 2) {
765 READ_NEXT_RF_PAIR(v1, v2, i);
766 }
767 }
768 }
769 }
770
771 if (rtlhal->oem_id == RT_CID_819X_HP)
772 _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD);
773
774 break;
775 case RF90_PATH_B:
776 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
777 "switch case not process\n");
778 break;
779 case RF90_PATH_C:
780 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
781 "switch case not process\n");
782 break;
783 case RF90_PATH_D:
784 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
785 "switch case not process\n");
786 break;
787 }
788 return true;
789}
790
791void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
792{
793 struct rtl_priv *rtlpriv = rtl_priv(hw);
794 struct rtl_phy *rtlphy = &(rtlpriv->phy);
795
796 rtlphy->default_initialgain[0] =
797 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
798 rtlphy->default_initialgain[1] =
799 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
800 rtlphy->default_initialgain[2] =
801 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
802 rtlphy->default_initialgain[3] =
803 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
804
805 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
806 "Default initial gain (c50 = 0x%x, "
807 "c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n",
808 rtlphy->default_initialgain[0],
809 rtlphy->default_initialgain[1],
810 rtlphy->default_initialgain[2],
811 rtlphy->default_initialgain[3]);
812
813 rtlphy->framesync = (u8) rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
814 MASKBYTE0);
815 rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
816 MASKDWORD);
817
818 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
819 "Default framesync (0x%x) = 0x%x\n",
820 ROFDM0_RXDETECTOR3, rtlphy->framesync);
821}
822
823void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
824{
825 struct rtl_priv *rtlpriv = rtl_priv(hw);
826 struct rtl_phy *rtlphy = &(rtlpriv->phy);
827 u8 txpwr_level;
828 long txpwr_dbm;
829
830 txpwr_level = rtlphy->cur_cck_txpwridx;
831 txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
832 txpwr_level);
833 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
834 if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
835 txpwr_dbm)
836 txpwr_dbm =
837 rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
838 txpwr_level);
839 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
840 if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
841 txpwr_level) > txpwr_dbm)
842 txpwr_dbm =
843 rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
844 txpwr_level);
845 *powerlevel = txpwr_dbm;
846}
847
848static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
849 u8 rate)
850{
851 u8 rate_section = 0;
852
853 switch (rate) {
854 case DESC92C_RATE1M:
855 rate_section = 2;
856 break;
857 case DESC92C_RATE2M:
858 case DESC92C_RATE5_5M:
859 if (path == RF90_PATH_A)
860 rate_section = 3;
861 else if (path == RF90_PATH_B)
862 rate_section = 2;
863 break;
864 case DESC92C_RATE11M:
865 rate_section = 3;
866 break;
867 case DESC92C_RATE6M:
868 case DESC92C_RATE9M:
869 case DESC92C_RATE12M:
870 case DESC92C_RATE18M:
871 rate_section = 0;
872 break;
873 case DESC92C_RATE24M:
874 case DESC92C_RATE36M:
875 case DESC92C_RATE48M:
876 case DESC92C_RATE54M:
877 rate_section = 1;
878 break;
879 case DESC92C_RATEMCS0:
880 case DESC92C_RATEMCS1:
881 case DESC92C_RATEMCS2:
882 case DESC92C_RATEMCS3:
883 rate_section = 4;
884 break;
885 case DESC92C_RATEMCS4:
886 case DESC92C_RATEMCS5:
887 case DESC92C_RATEMCS6:
888 case DESC92C_RATEMCS7:
889 rate_section = 5;
890 break;
891 case DESC92C_RATEMCS8:
892 case DESC92C_RATEMCS9:
893 case DESC92C_RATEMCS10:
894 case DESC92C_RATEMCS11:
895 rate_section = 6;
896 break;
897 case DESC92C_RATEMCS12:
898 case DESC92C_RATEMCS13:
899 case DESC92C_RATEMCS14:
900 case DESC92C_RATEMCS15:
901 rate_section = 7;
902 break;
903 default:
904 RT_ASSERT(true, "Rate_Section is Illegal\n");
905 break;
906 }
907 return rate_section;
908}
909
910static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw,
911 enum band_type band,
912 enum radio_path rfpath, u8 rate)
913{
914 struct rtl_priv *rtlpriv = rtl_priv(hw);
915 struct rtl_phy *rtlphy = &(rtlpriv->phy);
916 u8 shift = 0, rate_section, tx_num;
917 char tx_pwr_diff = 0;
918
919 rate_section = _rtl8723be_phy_get_ratesection_intxpower_byrate(rfpath,
920 rate);
921 tx_num = RF_TX_NUM_NONIMPLEMENT;
922
923 if (tx_num == RF_TX_NUM_NONIMPLEMENT) {
924 if (rate >= DESC92C_RATEMCS8 && rate <= DESC92C_RATEMCS15)
925 tx_num = RF_2TX;
926 else
927 tx_num = RF_1TX;
928 }
929
930 switch (rate) {
931 case DESC92C_RATE6M:
932 case DESC92C_RATE24M:
933 case DESC92C_RATEMCS0:
934 case DESC92C_RATEMCS4:
935 case DESC92C_RATEMCS8:
936 case DESC92C_RATEMCS12:
937 shift = 0;
938 break;
939 case DESC92C_RATE1M:
940 case DESC92C_RATE2M:
941 case DESC92C_RATE9M:
942 case DESC92C_RATE36M:
943 case DESC92C_RATEMCS1:
944 case DESC92C_RATEMCS5:
945 case DESC92C_RATEMCS9:
946 case DESC92C_RATEMCS13:
947 shift = 8;
948 break;
949 case DESC92C_RATE5_5M:
950 case DESC92C_RATE12M:
951 case DESC92C_RATE48M:
952 case DESC92C_RATEMCS2:
953 case DESC92C_RATEMCS6:
954 case DESC92C_RATEMCS10:
955 case DESC92C_RATEMCS14:
956 shift = 16;
957 break;
958 case DESC92C_RATE11M:
959 case DESC92C_RATE18M:
960 case DESC92C_RATE54M:
961 case DESC92C_RATEMCS3:
962 case DESC92C_RATEMCS7:
963 case DESC92C_RATEMCS11:
964 case DESC92C_RATEMCS15:
965 shift = 24;
966 break;
967 default:
968 RT_ASSERT(true, "Rate_Section is Illegal\n");
969 break;
970 }
971 tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num]
972 [rate_section] >> shift) & 0xff;
973
974 return tx_pwr_diff;
975}
976
977static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
978 u8 rate, u8 bandwidth, u8 channel)
979{
980 struct rtl_priv *rtlpriv = rtl_priv(hw);
981 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
982 u8 index = (channel - 1);
983 u8 txpower;
984 u8 power_diff_byrate = 0;
985
986 if (channel > 14 || channel < 1) {
987 index = 0;
988 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
989 "Illegal channel!\n");
990 }
991 if (RTL8723E_RX_HAL_IS_CCK_RATE(rate))
992 txpower = rtlefuse->txpwrlevel_cck[path][index];
993 else if (DESC92C_RATE6M <= rate)
994 txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
995 else
996 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
997 "invalid rate\n");
998
999 if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M &&
1000 !RTL8723E_RX_HAL_IS_CCK_RATE(rate))
1001 txpower += rtlefuse->txpwr_legacyhtdiff[0][TX_1S];
1002
1003 if (bandwidth == HT_CHANNEL_WIDTH_20) {
1004 if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
1005 txpower += rtlefuse->txpwr_ht20diff[0][TX_1S];
1006 if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
1007 txpower += rtlefuse->txpwr_ht20diff[0][TX_2S];
1008 } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
1009 if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
1010 txpower += rtlefuse->txpwr_ht40diff[0][TX_1S];
1011 if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
1012 txpower += rtlefuse->txpwr_ht40diff[0][TX_2S];
1013 }
1014 if (rtlefuse->eeprom_regulatory != 2)
1015 power_diff_byrate = _rtl8723be_get_txpower_by_rate(hw,
1016 BAND_ON_2_4G,
1017 path, rate);
1018
1019 txpower += power_diff_byrate;
1020
1021 if (txpower > MAX_POWER_INDEX)
1022 txpower = MAX_POWER_INDEX;
1023
1024 return txpower;
1025}
1026
1027static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
1028 u8 power_index, u8 path, u8 rate)
1029{
1030 struct rtl_priv *rtlpriv = rtl_priv(hw);
1031 if (path == RF90_PATH_A) {
1032 switch (rate) {
1033 case DESC92C_RATE1M:
1034 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_CCK1_MCS32,
1035 MASKBYTE1, power_index);
1036 break;
1037 case DESC92C_RATE2M:
1038 rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
1039 MASKBYTE1, power_index);
1040 break;
1041 case DESC92C_RATE5_5M:
1042 rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
1043 MASKBYTE2, power_index);
1044 break;
1045 case DESC92C_RATE11M:
1046 rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
1047 MASKBYTE3, power_index);
1048 break;
1049 case DESC92C_RATE6M:
1050 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
1051 MASKBYTE0, power_index);
1052 break;
1053 case DESC92C_RATE9M:
1054 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
1055 MASKBYTE1, power_index);
1056 break;
1057 case DESC92C_RATE12M:
1058 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
1059 MASKBYTE2, power_index);
1060 break;
1061 case DESC92C_RATE18M:
1062 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
1063 MASKBYTE3, power_index);
1064 break;
1065 case DESC92C_RATE24M:
1066 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
1067 MASKBYTE0, power_index);
1068 break;
1069 case DESC92C_RATE36M:
1070 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
1071 MASKBYTE1, power_index);
1072 break;
1073 case DESC92C_RATE48M:
1074 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
1075 MASKBYTE2, power_index);
1076 break;
1077 case DESC92C_RATE54M:
1078 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
1079 MASKBYTE3, power_index);
1080 break;
1081 case DESC92C_RATEMCS0:
1082 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
1083 MASKBYTE0, power_index);
1084 break;
1085 case DESC92C_RATEMCS1:
1086 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
1087 MASKBYTE1, power_index);
1088 break;
1089 case DESC92C_RATEMCS2:
1090 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
1091 MASKBYTE2, power_index);
1092 break;
1093 case DESC92C_RATEMCS3:
1094 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
1095 MASKBYTE3, power_index);
1096 break;
1097 case DESC92C_RATEMCS4:
1098 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
1099 MASKBYTE0, power_index);
1100 break;
1101 case DESC92C_RATEMCS5:
1102 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
1103 MASKBYTE1, power_index);
1104 break;
1105 case DESC92C_RATEMCS6:
1106 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
1107 MASKBYTE2, power_index);
1108 break;
1109 case DESC92C_RATEMCS7:
1110 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
1111 MASKBYTE3, power_index);
1112 break;
1113 case DESC92C_RATEMCS8:
1114 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
1115 MASKBYTE0, power_index);
1116 break;
1117 case DESC92C_RATEMCS9:
1118 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
1119 MASKBYTE1, power_index);
1120 break;
1121 case DESC92C_RATEMCS10:
1122 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
1123 MASKBYTE2, power_index);
1124 break;
1125 case DESC92C_RATEMCS11:
1126 rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
1127 MASKBYTE3, power_index);
1128 break;
1129 default:
1130 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1131 "Invalid Rate!!\n");
1132 break;
1133 }
1134 } else {
1135 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
1136 }
1137}
1138
1139void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
1140{
1141 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1142 u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M,
1143 DESC92C_RATE5_5M, DESC92C_RATE11M};
1144 u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M,
1145 DESC92C_RATE12M, DESC92C_RATE18M,
1146 DESC92C_RATE24M, DESC92C_RATE36M,
1147 DESC92C_RATE48M, DESC92C_RATE54M};
1148 u8 ht_rates_1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1,
1149 DESC92C_RATEMCS2, DESC92C_RATEMCS3,
1150 DESC92C_RATEMCS4, DESC92C_RATEMCS5,
1151 DESC92C_RATEMCS6, DESC92C_RATEMCS7};
1152 u8 i, size;
1153 u8 power_index;
1154
1155 if (!rtlefuse->txpwr_fromeprom)
1156 return;
1157
1158 size = sizeof(cck_rates) / sizeof(u8);
1159 for (i = 0; i < size; i++) {
1160 power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
1161 cck_rates[i],
1162 rtl_priv(hw)->phy.current_chan_bw,
1163 channel);
1164 _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
1165 cck_rates[i]);
1166 }
1167 size = sizeof(ofdm_rates) / sizeof(u8);
1168 for (i = 0; i < size; i++) {
1169 power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
1170 ofdm_rates[i],
1171 rtl_priv(hw)->phy.current_chan_bw,
1172 channel);
1173 _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
1174 ofdm_rates[i]);
1175 }
1176 size = sizeof(ht_rates_1t) / sizeof(u8);
1177 for (i = 0; i < size; i++) {
1178 power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
1179 ht_rates_1t[i],
1180 rtl_priv(hw)->phy.current_chan_bw,
1181 channel);
1182 _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
1183 ht_rates_1t[i]);
1184 }
1185}
1186
1187void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1188{
1189 struct rtl_priv *rtlpriv = rtl_priv(hw);
1190 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1191 enum io_type iotype;
1192
1193 if (!is_hal_stop(rtlhal)) {
1194 switch (operation) {
1195 case SCAN_OPT_BACKUP:
1196 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1197 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
1198 (u8 *)&iotype);
1199 break;
1200 case SCAN_OPT_RESTORE:
1201 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1202 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
1203 (u8 *)&iotype);
1204 break;
1205 default:
1206 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1207 "Unknown Scan Backup operation.\n");
1208 break;
1209 }
1210 }
1211}
1212
1213void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1214{
1215 struct rtl_priv *rtlpriv = rtl_priv(hw);
1216 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1217 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1218 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1219 u8 reg_bw_opmode;
1220 u8 reg_prsr_rsc;
1221
1222 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1223 "Switch to %s bandwidth\n",
1224 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
1225 "20MHz" : "40MHz");
1226
1227 if (is_hal_stop(rtlhal)) {
1228 rtlphy->set_bwmode_inprogress = false;
1229 return;
1230 }
1231
1232 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
1233 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
1234
1235 switch (rtlphy->current_chan_bw) {
1236 case HT_CHANNEL_WIDTH_20:
1237 reg_bw_opmode |= BW_OPMODE_20MHZ;
1238 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
1239 break;
1240 case HT_CHANNEL_WIDTH_20_40:
1241 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
1242 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
1243 reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
1244 (mac->cur_40_prime_sc << 5);
1245 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
1246 break;
1247 default:
1248 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1249 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
1250 break;
1251 }
1252
1253 switch (rtlphy->current_chan_bw) {
1254 case HT_CHANNEL_WIDTH_20:
1255 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
1256 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
1257 break;
1258 case HT_CHANNEL_WIDTH_20_40:
1259 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
1260 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
1261 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
1262 (mac->cur_40_prime_sc >> 1));
1263 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
1264 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
1265 (mac->cur_40_prime_sc ==
1266 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
1267 break;
1268 default:
1269 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1270 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
1271 break;
1272 }
1273 rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
1274 rtlphy->set_bwmode_inprogress = false;
1275 RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
1276}
1277
1278void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
1279 enum nl80211_channel_type ch_type)
1280{
1281 struct rtl_priv *rtlpriv = rtl_priv(hw);
1282 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1283 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1284 u8 tmp_bw = rtlphy->current_chan_bw;
1285
1286 if (rtlphy->set_bwmode_inprogress)
1287 return;
1288 rtlphy->set_bwmode_inprogress = true;
1289 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1290 rtl8723be_phy_set_bw_mode_callback(hw);
1291 } else {
1292 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1293 "false driver sleep or unload\n");
1294 rtlphy->set_bwmode_inprogress = false;
1295 rtlphy->current_chan_bw = tmp_bw;
1296 }
1297}
1298
1299void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
1300{
1301 struct rtl_priv *rtlpriv = rtl_priv(hw);
1302 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1303 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1304 u32 delay;
1305
1306 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1307 "switch to channel%d\n", rtlphy->current_channel);
1308 if (is_hal_stop(rtlhal))
1309 return;
1310 do {
1311 if (!rtlphy->sw_chnl_inprogress)
1312 break;
1313 if (!rtl8723be_phy_sw_chn_step_by_step(hw,
1314 rtlphy->current_channel,
1315 &rtlphy->sw_chnl_stage,
1316 &rtlphy->sw_chnl_step,
1317 &delay)) {
1318 if (delay > 0)
1319 mdelay(delay);
1320 else
1321 continue;
1322 } else {
1323 rtlphy->sw_chnl_inprogress = false;
1324 }
1325 break;
1326 } while (true);
1327 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
1328}
1329
1330u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
1331{
1332 struct rtl_priv *rtlpriv = rtl_priv(hw);
1333 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1334 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1335
1336 if (rtlphy->sw_chnl_inprogress)
1337 return 0;
1338 if (rtlphy->set_bwmode_inprogress)
1339 return 0;
1340 RT_ASSERT((rtlphy->current_channel <= 14),
1341 "WIRELESS_MODE_G but channel>14");
1342 rtlphy->sw_chnl_inprogress = true;
1343 rtlphy->sw_chnl_stage = 0;
1344 rtlphy->sw_chnl_step = 0;
1345 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1346 rtl8723be_phy_sw_chnl_callback(hw);
1347 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1348 "sw_chnl_inprogress false schdule "
1349 "workitem current channel %d\n",
1350 rtlphy->current_channel);
1351 rtlphy->sw_chnl_inprogress = false;
1352 } else {
1353 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1354 "sw_chnl_inprogress false driver sleep or"
1355 " unload\n");
1356 rtlphy->sw_chnl_inprogress = false;
1357 }
1358 return 1;
1359}
1360
1361static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
1362 u8 channel, u8 *stage,
1363 u8 *step, u32 *delay)
1364{
1365 struct rtl_priv *rtlpriv = rtl_priv(hw);
1366 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1367 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
1368 u32 precommoncmdcnt;
1369 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
1370 u32 postcommoncmdcnt;
1371 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
1372 u32 rfdependcmdcnt;
1373 struct swchnlcmd *currentcmd = NULL;
1374 u8 rfpath;
1375 u8 num_total_rfpath = rtlphy->num_total_rfpath;
1376
1377 precommoncmdcnt = 0;
1378 rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1379 MAX_PRECMD_CNT,
1380 CMDID_SET_TXPOWEROWER_LEVEL,
1381 0, 0, 0);
1382 rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1383 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
1384 postcommoncmdcnt = 0;
1385 rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
1386 MAX_POSTCMD_CNT, CMDID_END,
1387 0, 0, 0);
1388 rfdependcmdcnt = 0;
1389
1390 RT_ASSERT((channel >= 1 && channel <= 14),
1391 "illegal channel for Zebra: %d\n", channel);
1392
1393 rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1394 MAX_RFDEPENDCMD_CNT,
1395 CMDID_RF_WRITEREG,
1396 RF_CHNLBW, channel, 10);
1397
1398 rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1399 MAX_RFDEPENDCMD_CNT,
1400 CMDID_END, 0, 0, 0);
1401
1402 do {
1403 switch (*stage) {
1404 case 0:
1405 currentcmd = &precommoncmd[*step];
1406 break;
1407 case 1:
1408 currentcmd = &rfdependcmd[*step];
1409 break;
1410 case 2:
1411 currentcmd = &postcommoncmd[*step];
1412 break;
1413 }
1414
1415 if (currentcmd->cmdid == CMDID_END) {
1416 if ((*stage) == 2) {
1417 return true;
1418 } else {
1419 (*stage)++;
1420 (*step) = 0;
1421 continue;
1422 }
1423 }
1424
1425 switch (currentcmd->cmdid) {
1426 case CMDID_SET_TXPOWEROWER_LEVEL:
1427 rtl8723be_phy_set_txpower_level(hw, channel);
1428 break;
1429 case CMDID_WRITEPORT_ULONG:
1430 rtl_write_dword(rtlpriv, currentcmd->para1,
1431 currentcmd->para2);
1432 break;
1433 case CMDID_WRITEPORT_USHORT:
1434 rtl_write_word(rtlpriv, currentcmd->para1,
1435 (u16) currentcmd->para2);
1436 break;
1437 case CMDID_WRITEPORT_UCHAR:
1438 rtl_write_byte(rtlpriv, currentcmd->para1,
1439 (u8) currentcmd->para2);
1440 break;
1441 case CMDID_RF_WRITEREG:
1442 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
1443 rtlphy->rfreg_chnlval[rfpath] =
1444 ((rtlphy->rfreg_chnlval[rfpath] &
1445 0xfffffc00) | currentcmd->para2);
1446
1447 rtl_set_rfreg(hw, (enum radio_path)rfpath,
1448 currentcmd->para1,
1449 RFREG_OFFSET_MASK,
1450 rtlphy->rfreg_chnlval[rfpath]);
1451 }
1452 break;
1453 default:
1454 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1455 "switch case not process\n");
1456 break;
1457 }
1458
1459 break;
1460 } while (true);
1461
1462 (*delay) = currentcmd->msdelay;
1463 (*step)++;
1464 return false;
1465}
1466
1467static u8 _rtl8723be_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
1468{
1469 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
1470 u8 result = 0x00;
1471
1472 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c);
1473 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x30008c1c);
1474 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x8214032a);
1475 rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160000);
1476
1477 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
1478 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1479 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1480
1481 mdelay(IQK_DELAY_TIME);
1482
1483 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1484 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1485 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1486 reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
1487
1488 if (!(reg_eac & BIT(28)) &&
1489 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
1490 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
1491 result |= 0x01;
1492 return result;
1493}
1494
1495static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8],
1496 u8 c1, u8 c2)
1497{
1498 u32 i, j, diff, simularity_bitmap, bound;
1499 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1500
1501 u8 final_candidate[2] = { 0xFF, 0xFF };
1502 bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
1503
1504 if (is2t)
1505 bound = 8;
1506 else
1507 bound = 4;
1508
1509 simularity_bitmap = 0;
1510
1511 for (i = 0; i < bound; i++) {
1512 diff = (result[c1][i] > result[c2][i]) ?
1513 (result[c1][i] - result[c2][i]) :
1514 (result[c2][i] - result[c1][i]);
1515
1516 if (diff > MAX_TOLERANCE) {
1517 if ((i == 2 || i == 6) && !simularity_bitmap) {
1518 if (result[c1][i] + result[c1][i + 1] == 0)
1519 final_candidate[(i / 4)] = c2;
1520 else if (result[c2][i] + result[c2][i + 1] == 0)
1521 final_candidate[(i / 4)] = c1;
1522 else
1523 simularity_bitmap |= (1 << i);
1524 } else {
1525 simularity_bitmap |= (1 << i);
1526 }
1527 }
1528 }
1529
1530 if (simularity_bitmap == 0) {
1531 for (i = 0; i < (bound / 4); i++) {
1532 if (final_candidate[i] != 0xFF) {
1533 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1534 result[3][j] =
1535 result[final_candidate[i]][j];
1536 bresult = false;
1537 }
1538 }
1539 return bresult;
1540 } else if (!(simularity_bitmap & 0x0F)) {
1541 for (i = 0; i < 4; i++)
1542 result[3][i] = result[c1][i];
1543 return false;
1544 } else if (!(simularity_bitmap & 0xF0) && is2t) {
1545 for (i = 4; i < 8; i++)
1546 result[3][i] = result[c1][i];
1547 return false;
1548 } else {
1549 return false;
1550 }
1551}
1552
1553static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
1554 long result[][8], u8 t, bool is2t)
1555{
1556 struct rtl_priv *rtlpriv = rtl_priv(hw);
1557 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1558 u32 i;
1559 u8 patha_ok;
1560 u32 adda_reg[IQK_ADDA_REG_NUM] = {
1561 0x85c, 0xe6c, 0xe70, 0xe74,
1562 0xe78, 0xe7c, 0xe80, 0xe84,
1563 0xe88, 0xe8c, 0xed0, 0xed4,
1564 0xed8, 0xedc, 0xee0, 0xeec
1565 };
1566
1567 u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1568 0x522, 0x550, 0x551, 0x040
1569 };
1570 u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
1571 ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR,
1572 RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c,
1573 0x870, 0x860,
1574 0x864, 0x800
1575 };
1576 const u32 retrycount = 2;
1577 u32 path_sel_bb, path_sel_rf;
1578 u8 tmp_reg_c50, tmp_reg_c58;
1579
1580 tmp_reg_c50 = rtl_get_bbreg(hw, 0xc50, MASKBYTE0);
1581 tmp_reg_c58 = rtl_get_bbreg(hw, 0xc58, MASKBYTE0);
1582
1583 if (t == 0) {
1584 rtl8723_save_adda_registers(hw, adda_reg,
1585 rtlphy->adda_backup, 16);
1586 rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
1587 rtlphy->iqk_mac_backup);
1588 rtl8723_save_adda_registers(hw, iqk_bb_reg,
1589 rtlphy->iqk_bb_backup,
1590 IQK_BB_REG_NUM);
1591 }
1592 rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
1593 if (t == 0) {
1594 rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
1595 RFPGA0_XA_HSSIPARAMETER1,
1596 BIT(8));
1597 }
1598 if (!rtlphy->rfpi_enable)
1599 rtl8723_phy_pi_mode_switch(hw, true);
1600
1601 path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD);
1602 path_sel_rf = rtl_get_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff);
1603
1604 /*BB Setting*/
1605 rtl_set_bbreg(hw, 0x800, BIT(24), 0x00);
1606 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1607 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1608 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1609
1610 rtl_set_bbreg(hw, 0x870, BIT(10), 0x01);
1611 rtl_set_bbreg(hw, 0x870, BIT(26), 0x01);
1612 rtl_set_bbreg(hw, 0x860, BIT(10), 0x00);
1613 rtl_set_bbreg(hw, 0x864, BIT(10), 0x00);
1614
1615 if (is2t)
1616 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASKDWORD, 0x10000);
1617 rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
1618 rtlphy->iqk_mac_backup);
1619 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
1620
1621 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1622 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1623 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x81004800);
1624 for (i = 0; i < retrycount; i++) {
1625 patha_ok = _rtl8723be_phy_path_a_iqk(hw, is2t);
1626 if (patha_ok == 0x01) {
1627 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1628 "Path A Tx IQK Success!!\n");
1629 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1630 0x3FF0000) >> 16;
1631 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1632 0x3FF0000) >> 16;
1633 break;
1634 }
1635 }
1636
1637 if (0 == patha_ok)
1638 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1639 "Path A IQK Success!!\n");
1640 if (is2t) {
1641 rtl8723_phy_path_a_standby(hw);
1642 rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
1643 }
1644
1645 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1646
1647 if (t != 0) {
1648 if (!rtlphy->rfpi_enable)
1649 rtl8723_phy_pi_mode_switch(hw, false);
1650 rtl8723_phy_reload_adda_registers(hw, adda_reg,
1651 rtlphy->adda_backup, 16);
1652 rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
1653 rtlphy->iqk_mac_backup);
1654 rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
1655 rtlphy->iqk_bb_backup,
1656 IQK_BB_REG_NUM);
1657
1658 rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb);
1659 rtl_set_rfreg(hw, RF90_PATH_B, 0xb0, 0xfffff, path_sel_rf);
1660
1661 rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50);
1662 rtl_set_bbreg(hw, 0xc50, MASKBYTE0, tmp_reg_c50);
1663 if (is2t) {
1664 rtl_set_bbreg(hw, 0xc58, MASKBYTE0, 0x50);
1665 rtl_set_bbreg(hw, 0xc58, MASKBYTE0, tmp_reg_c58);
1666 }
1667 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
1668 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
1669 }
1670 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
1671}
1672
1673static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1674{
1675 struct rtl_priv *rtlpriv = rtl_priv(hw);
1676 u8 tmpreg;
1677 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
1678
1679 tmpreg = rtl_read_byte(rtlpriv, 0xd03);
1680
1681 if ((tmpreg & 0x70) != 0)
1682 rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
1683 else
1684 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1685
1686 if ((tmpreg & 0x70) != 0) {
1687 rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
1688
1689 if (is2t)
1690 rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
1691 MASK12BITS);
1692
1693 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
1694 (rf_a_mode & 0x8FFFF) | 0x10000);
1695
1696 if (is2t)
1697 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
1698 (rf_b_mode & 0x8FFFF) | 0x10000);
1699 }
1700 lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
1701
1702 rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0);
1703 rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a);
1704
1705 mdelay(100);
1706
1707 rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0);
1708
1709 if ((tmpreg & 0x70) != 0) {
1710 rtl_write_byte(rtlpriv, 0xd03, tmpreg);
1711 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
1712
1713 if (is2t)
1714 rtl_set_rfreg(hw, RF90_PATH_B, 0x00,
1715 MASK12BITS, rf_b_mode);
1716 } else {
1717 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
1718 }
1719 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
1720}
1721
1722static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
1723 bool bmain, bool is2t)
1724{
1725 struct rtl_priv *rtlpriv = rtl_priv(hw);
1726 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1727 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1728 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
1729
1730 if (is_hal_stop(rtlhal)) {
1731 u8 u1btmp;
1732 u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0);
1733 rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7));
1734 rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
1735 }
1736 if (is2t) {
1737 if (bmain)
1738 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1739 BIT(5) | BIT(6), 0x1);
1740 else
1741 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1742 BIT(5) | BIT(6), 0x2);
1743 } else {
1744 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0);
1745 rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201);
1746
1747 /* We use the RF definition of MAIN and AUX,
1748 * left antenna and right antenna repectively.
1749 * Default output at AUX.
1750 */
1751 if (bmain) {
1752 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
1753 BIT(14) | BIT(13) | BIT(12), 0);
1754 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1755 BIT(5) | BIT(4) | BIT(3), 0);
1756 if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
1757 rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 0);
1758 } else {
1759 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
1760 BIT(14) | BIT(13) | BIT(12), 1);
1761 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1762 BIT(5) | BIT(4) | BIT(3), 1);
1763 if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
1764 rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 1);
1765 }
1766 }
1767}
1768
1769#undef IQK_ADDA_REG_NUM
1770#undef IQK_DELAY_TIME
1771
1772void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1773{
1774 struct rtl_priv *rtlpriv = rtl_priv(hw);
1775 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1776 long result[4][8];
1777 u8 i, final_candidate;
1778 bool patha_ok, pathb_ok;
1779 long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
1780 reg_ecc, reg_tmp = 0;
1781 bool is12simular, is13simular, is23simular;
1782 u32 iqk_bb_reg[9] = {
1783 ROFDM0_XARXIQIMBALANCE,
1784 ROFDM0_XBRXIQIMBALANCE,
1785 ROFDM0_ECCATHRESHOLD,
1786 ROFDM0_AGCRSSITABLE,
1787 ROFDM0_XATXIQIMBALANCE,
1788 ROFDM0_XBTXIQIMBALANCE,
1789 ROFDM0_XCTXAFE,
1790 ROFDM0_XDTXAFE,
1791 ROFDM0_RXIQEXTANTA
1792 };
1793
1794 if (recovery) {
1795 rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
1796 rtlphy->iqk_bb_backup, 9);
1797 return;
1798 }
1799
1800 for (i = 0; i < 8; i++) {
1801 result[0][i] = 0;
1802 result[1][i] = 0;
1803 result[2][i] = 0;
1804 result[3][i] = 0;
1805 }
1806 final_candidate = 0xff;
1807 patha_ok = false;
1808 pathb_ok = false;
1809 is12simular = false;
1810 is23simular = false;
1811 is13simular = false;
1812 for (i = 0; i < 3; i++) {
1813 if (get_rf_type(rtlphy) == RF_2T2R)
1814 _rtl8723be_phy_iq_calibrate(hw, result, i, true);
1815 else
1816 _rtl8723be_phy_iq_calibrate(hw, result, i, false);
1817 if (i == 1) {
1818 is12simular = phy_similarity_cmp(hw, result, 0, 1);
1819 if (is12simular) {
1820 final_candidate = 0;
1821 break;
1822 }
1823 }
1824 if (i == 2) {
1825 is13simular = phy_similarity_cmp(hw, result, 0, 2);
1826 if (is13simular) {
1827 final_candidate = 0;
1828 break;
1829 }
1830 is23simular = phy_similarity_cmp(hw, result, 1, 2);
1831 if (is23simular) {
1832 final_candidate = 1;
1833 } else {
1834 for (i = 0; i < 8; i++)
1835 reg_tmp += result[3][i];
1836
1837 if (reg_tmp != 0)
1838 final_candidate = 3;
1839 else
1840 final_candidate = 0xFF;
1841 }
1842 }
1843 }
1844 for (i = 0; i < 4; i++) {
1845 reg_e94 = result[i][0];
1846 reg_e9c = result[i][1];
1847 reg_ea4 = result[i][2];
1848 reg_eac = result[i][3];
1849 reg_eb4 = result[i][4];
1850 reg_ebc = result[i][5];
1851 reg_ec4 = result[i][6];
1852 reg_ecc = result[i][7];
1853 }
1854 if (final_candidate != 0xff) {
1855 reg_e94 = result[final_candidate][0];
1856 rtlphy->reg_e94 = reg_e94;
1857 reg_e9c = result[final_candidate][1];
1858 rtlphy->reg_e9c = reg_e9c;
1859 reg_ea4 = result[final_candidate][2];
1860 reg_eac = result[final_candidate][3];
1861 reg_eb4 = result[final_candidate][4];
1862 rtlphy->reg_eb4 = reg_eb4;
1863 reg_ebc = result[final_candidate][5];
1864 rtlphy->reg_ebc = reg_ebc;
1865 reg_ec4 = result[final_candidate][6];
1866 reg_ecc = result[final_candidate][7];
1867 patha_ok = true;
1868 pathb_ok = true;
1869 } else {
1870 rtlphy->reg_e94 = 0x100;
1871 rtlphy->reg_eb4 = 0x100;
1872 rtlphy->reg_e9c = 0x0;
1873 rtlphy->reg_ebc = 0x0;
1874 }
1875 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
1876 rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
1877 final_candidate,
1878 (reg_ea4 == 0));
1879 if (final_candidate != 0xFF) {
1880 for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
1881 rtlphy->iqk_matrix[0].value[0][i] =
1882 result[final_candidate][i];
1883 rtlphy->iqk_matrix[0].iqk_done = true;
1884 }
1885 rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9);
1886}
1887
1888void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw)
1889{
1890 struct rtl_priv *rtlpriv = rtl_priv(hw);
1891 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1892 struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
1893 u32 timeout = 2000, timecount = 0;
1894
1895 while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
1896 udelay(50);
1897 timecount += 50;
1898 }
1899
1900 rtlphy->lck_inprogress = true;
1901 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1902 "LCK:Start!!! currentband %x delay %d ms\n",
1903 rtlhal->current_bandtype, timecount);
1904
1905 _rtl8723be_phy_lc_calibrate(hw, false);
1906
1907 rtlphy->lck_inprogress = false;
1908}
1909
1910void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
1911{
1912 struct rtl_priv *rtlpriv = rtl_priv(hw);
1913 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1914
1915 if (rtlphy->apk_done)
1916 return;
1917
1918 return;
1919}
1920
1921void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
1922{
1923 _rtl8723be_phy_set_rfpath_switch(hw, bmain, false);
1924}
1925
1926static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
1927{
1928 struct rtl_priv *rtlpriv = rtl_priv(hw);
1929 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1930
1931 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1932 "--->Cmd(%#x), set_io_inprogress(%d)\n",
1933 rtlphy->current_io_type, rtlphy->set_io_inprogress);
1934 switch (rtlphy->current_io_type) {
1935 case IO_CMD_RESUME_DM_BY_SCAN:
1936 rtlpriv->dm_digtable.cur_igvalue =
1937 rtlphy->initgain_backup.xaagccore1;
1938 /*rtl92c_dm_write_dig(hw);*/
1939 rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
1940 rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83);
1941 break;
1942 case IO_CMD_PAUSE_DM_BY_SCAN:
1943 rtlphy->initgain_backup.xaagccore1 =
1944 rtlpriv->dm_digtable.cur_igvalue;
1945 rtlpriv->dm_digtable.cur_igvalue = 0x17;
1946 rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
1947 break;
1948 default:
1949 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1950 "switch case not process\n");
1951 break;
1952 }
1953 rtlphy->set_io_inprogress = false;
1954 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1955 "(%#x)\n", rtlphy->current_io_type);
1956}
1957
1958bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
1959{
1960 struct rtl_priv *rtlpriv = rtl_priv(hw);
1961 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1962 bool postprocessing = false;
1963
1964 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1965 "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
1966 iotype, rtlphy->set_io_inprogress);
1967 do {
1968 switch (iotype) {
1969 case IO_CMD_RESUME_DM_BY_SCAN:
1970 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1971 "[IO CMD] Resume DM after scan.\n");
1972 postprocessing = true;
1973 break;
1974 case IO_CMD_PAUSE_DM_BY_SCAN:
1975 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1976 "[IO CMD] Pause DM before scan.\n");
1977 postprocessing = true;
1978 break;
1979 default:
1980 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1981 "switch case not process\n");
1982 break;
1983 }
1984 } while (false);
1985 if (postprocessing && !rtlphy->set_io_inprogress) {
1986 rtlphy->set_io_inprogress = true;
1987 rtlphy->current_io_type = iotype;
1988 } else {
1989 return false;
1990 }
1991 rtl8723be_phy_set_io(hw);
1992 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
1993 return true;
1994}
1995
1996static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw)
1997{
1998 struct rtl_priv *rtlpriv = rtl_priv(hw);
1999
2000 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
2001 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2002 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2003 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2004 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2005}
2006
2007static void _rtl8723be_phy_set_rf_sleep(struct ieee80211_hw *hw)
2008{
2009 struct rtl_priv *rtlpriv = rtl_priv(hw);
2010
2011 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
2012 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2013 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2014 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2015}
2016
2017static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
2018 enum rf_pwrstate rfpwr_state)
2019{
2020 struct rtl_priv *rtlpriv = rtl_priv(hw);
2021 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2022 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2023 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2024 bool bresult = true;
2025 u8 i, queue_id;
2026 struct rtl8192_tx_ring *ring = NULL;
2027
2028 switch (rfpwr_state) {
2029 case ERFON:
2030 if ((ppsc->rfpwr_state == ERFOFF) &&
2031 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
2032 bool rtstatus;
2033 u32 initialize_count = 0;
2034 do {
2035 initialize_count++;
2036 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2037 "IPS Set eRf nic enable\n");
2038 rtstatus = rtl_ps_enable_nic(hw);
2039 } while (!rtstatus && (initialize_count < 10));
2040 RT_CLEAR_PS_LEVEL(ppsc,
2041 RT_RF_OFF_LEVL_HALT_NIC);
2042 } else {
2043 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2044 "Set ERFON sleeped:%d ms\n",
2045 jiffies_to_msecs(jiffies -
2046 ppsc->last_sleep_jiffies));
2047 ppsc->last_awake_jiffies = jiffies;
2048 rtl8723be_phy_set_rf_on(hw);
2049 }
2050 if (mac->link_state == MAC80211_LINKED)
2051 rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
2052 else
2053 rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
2054 break;
2055 case ERFOFF:
2056 for (queue_id = 0, i = 0;
2057 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2058 ring = &pcipriv->dev.tx_ring[queue_id];
2059 if (skb_queue_len(&ring->queue) == 0) {
2060 queue_id++;
2061 continue;
2062 } else {
2063 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2064 "eRf Off/Sleep: %d times "
2065 "TcbBusyQueue[%d] =%d before "
2066 "doze!\n", (i + 1), queue_id,
2067 skb_queue_len(&ring->queue));
2068
2069 udelay(10);
2070 i++;
2071 }
2072 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
2073 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2074 "\n ERFSLEEP: %d times "
2075 "TcbBusyQueue[%d] = %d !\n",
2076 MAX_DOZE_WAITING_TIMES_9x,
2077 queue_id,
2078 skb_queue_len(&ring->queue));
2079 break;
2080 }
2081 }
2082
2083 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
2084 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2085 "IPS Set eRf nic disable\n");
2086 rtl_ps_disable_nic(hw);
2087 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2088 } else {
2089 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
2090 rtlpriv->cfg->ops->led_control(hw,
2091 LED_CTL_NO_LINK);
2092 } else {
2093 rtlpriv->cfg->ops->led_control(hw,
2094 LED_CTL_POWER_OFF);
2095 }
2096 }
2097 break;
2098 case ERFSLEEP:
2099 if (ppsc->rfpwr_state == ERFOFF)
2100 break;
2101 for (queue_id = 0, i = 0;
2102 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2103 ring = &pcipriv->dev.tx_ring[queue_id];
2104 if (skb_queue_len(&ring->queue) == 0) {
2105 queue_id++;
2106 continue;
2107 } else {
2108 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2109 "eRf Off/Sleep: %d times "
2110 "TcbBusyQueue[%d] =%d before "
2111 "doze!\n", (i + 1), queue_id,
2112 skb_queue_len(&ring->queue));
2113
2114 udelay(10);
2115 i++;
2116 }
2117 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
2118 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2119 "\n ERFSLEEP: %d times "
2120 "TcbBusyQueue[%d] = %d !\n",
2121 MAX_DOZE_WAITING_TIMES_9x,
2122 queue_id,
2123 skb_queue_len(&ring->queue));
2124 break;
2125 }
2126 }
2127 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2128 "Set ERFSLEEP awaked:%d ms\n",
2129 jiffies_to_msecs(jiffies -
2130 ppsc->last_awake_jiffies));
2131 ppsc->last_sleep_jiffies = jiffies;
2132 _rtl8723be_phy_set_rf_sleep(hw);
2133 break;
2134 default:
2135 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2136 "switch case not process\n");
2137 bresult = false;
2138 break;
2139 }
2140 if (bresult)
2141 ppsc->rfpwr_state = rfpwr_state;
2142 return bresult;
2143}
2144
2145bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
2146 enum rf_pwrstate rfpwr_state)
2147{
2148 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2149
2150 bool bresult = false;
2151
2152 if (rfpwr_state == ppsc->rfpwr_state)
2153 return bresult;
2154 bresult = _rtl8723be_phy_set_rf_power_state(hw, rfpwr_state);
2155 return bresult;
2156}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
new file mode 100644
index 000000000000..444ef95bb6af
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
@@ -0,0 +1,217 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_PHY_H__
27#define __RTL8723BE_PHY_H__
28
29/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
30#define MAX_TX_COUNT 4
31#define TX_1S 0
32#define TX_2S 1
33
34#define MAX_POWER_INDEX 0x3F
35
36#define MAX_PRECMD_CNT 16
37#define MAX_RFDEPENDCMD_CNT 16
38#define MAX_POSTCMD_CNT 16
39
40#define MAX_DOZE_WAITING_TIMES_9x 64
41
42#define RT_CANNOT_IO(hw) false
43#define HIGHPOWER_RADIOA_ARRAYLEN 22
44
45#define IQK_ADDA_REG_NUM 16
46#define IQK_BB_REG_NUM 9
47#define MAX_TOLERANCE 5
48#define IQK_DELAY_TIME 10
49#define index_mapping_NUM 15
50
51#define APK_BB_REG_NUM 5
52#define APK_AFE_REG_NUM 16
53#define APK_CURVE_REG_NUM 4
54#define PATH_NUM 1
55
56#define LOOP_LIMIT 5
57#define MAX_STALL_TIME 50
58#define ANTENNADIVERSITYVALUE 0x80
59#define MAX_TXPWR_IDX_NMODE_92S 63
60#define RESET_CNT_LIMIT 3
61
62#define IQK_ADDA_REG_NUM 16
63#define IQK_MAC_REG_NUM 4
64
65#define RF6052_MAX_PATH 2
66
67#define CT_OFFSET_MAC_ADDR 0X16
68
69#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
70#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
71#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
72#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
73#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
74
75#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
76#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
77
78#define CT_OFFSET_CHANNEL_PLAH 0x75
79#define CT_OFFSET_THERMAL_METER 0x78
80#define CT_OFFSET_RF_OPTION 0x79
81#define CT_OFFSET_VERSION 0x7E
82#define CT_OFFSET_CUSTOMER_ID 0x7F
83
84#define RTL92C_MAX_PATH_NUM 2
85
86enum hw90_block_e {
87 HW90_BLOCK_MAC = 0,
88 HW90_BLOCK_PHY0 = 1,
89 HW90_BLOCK_PHY1 = 2,
90 HW90_BLOCK_RF = 3,
91 HW90_BLOCK_MAXIMUM = 4,
92};
93
94enum baseband_config_type {
95 BASEBAND_CONFIG_PHY_REG = 0,
96 BASEBAND_CONFIG_AGC_TAB = 1,
97};
98
99enum ra_offset_area {
100 RA_OFFSET_LEGACY_OFDM1,
101 RA_OFFSET_LEGACY_OFDM2,
102 RA_OFFSET_HT_OFDM1,
103 RA_OFFSET_HT_OFDM2,
104 RA_OFFSET_HT_OFDM3,
105 RA_OFFSET_HT_OFDM4,
106 RA_OFFSET_HT_CCK,
107};
108
109enum antenna_path {
110 ANTENNA_NONE,
111 ANTENNA_D,
112 ANTENNA_C,
113 ANTENNA_CD,
114 ANTENNA_B,
115 ANTENNA_BD,
116 ANTENNA_BC,
117 ANTENNA_BCD,
118 ANTENNA_A,
119 ANTENNA_AD,
120 ANTENNA_AC,
121 ANTENNA_ACD,
122 ANTENNA_AB,
123 ANTENNA_ABD,
124 ANTENNA_ABC,
125 ANTENNA_ABCD
126};
127
128struct r_antenna_select_ofdm {
129 u32 r_tx_antenna:4;
130 u32 r_ant_l:4;
131 u32 r_ant_non_ht:4;
132 u32 r_ant_ht1:4;
133 u32 r_ant_ht2:4;
134 u32 r_ant_ht_s1:4;
135 u32 r_ant_non_ht_s1:4;
136 u32 ofdm_txsc:2;
137 u32 reserved:2;
138};
139
140struct r_antenna_select_cck {
141 u8 r_cckrx_enable_2:2;
142 u8 r_cckrx_enable:2;
143 u8 r_ccktx_enable:4;
144};
145
146
147struct efuse_contents {
148 u8 mac_addr[ETH_ALEN];
149 u8 cck_tx_power_idx[6];
150 u8 ht40_1s_tx_power_idx[6];
151 u8 ht40_2s_tx_power_idx_diff[3];
152 u8 ht20_tx_power_idx_diff[3];
153 u8 ofdm_tx_power_idx_diff[3];
154 u8 ht40_max_power_offset[3];
155 u8 ht20_max_power_offset[3];
156 u8 channel_plan;
157 u8 thermal_meter;
158 u8 rf_option[5];
159 u8 version;
160 u8 oem_id;
161 u8 regulatory;
162};
163
164struct tx_power_struct {
165 u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
166 u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
167 u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
168 u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
169 u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
170 u8 legacy_ht_txpowerdiff;
171 u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
172 u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
173 u8 pwrgroup_cnt;
174 u32 mcs_original_offset[4][16];
175};
176
177enum _ANT_DIV_TYPE {
178 NO_ANTDIV = 0xFF,
179 CG_TRX_HW_ANTDIV = 0x01,
180 CGCS_RX_HW_ANTDIV = 0x02,
181 FIXED_HW_ANTDIV = 0x03,
182 CG_TRX_SMART_ANTDIV = 0x04,
183 CGCS_RX_SW_ANTDIV = 0x05,
184};
185
186u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw,
187 enum radio_path rfpath,
188 u32 regaddr, u32 bitmask);
189void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw,
190 enum radio_path rfpath,
191 u32 regaddr, u32 bitmask, u32 data);
192bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
193bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
194bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
195void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
196void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
197 long *powerlevel);
198void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
199 u8 channel);
200void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
201 u8 operation);
202void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
203void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
204 enum nl80211_channel_type ch_type);
205void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw);
206u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw);
207void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
208 bool b_recovery);
209void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
210void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw);
211void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
212bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
213 enum radio_path rfpath);
214bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
215bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
216 enum rf_pwrstate rfpwr_state);
217#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
new file mode 100644
index 000000000000..b5167e73fecf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
@@ -0,0 +1,106 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "pwrseqcmd.h"
27#include "pwrseq.h"
28
29
30/* drivers should parse below arrays and do the corresponding actions */
31/*3 Power on Array*/
32struct wlan_pwr_cfg rtl8723B_power_on_flow[RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
33 RTL8723B_TRANS_END_STEPS] = {
34 RTL8723B_TRANS_CARDEMU_TO_ACT
35 RTL8723B_TRANS_END
36};
37
38/*3Radio off GPIO Array */
39struct wlan_pwr_cfg rtl8723B_radio_off_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS
40 + RTL8723B_TRANS_END_STEPS] = {
41 RTL8723B_TRANS_ACT_TO_CARDEMU
42 RTL8723B_TRANS_END
43};
44
45/*3Card Disable Array*/
46struct wlan_pwr_cfg rtl8723B_card_disable_flow
47 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
48 RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
49 RTL8723B_TRANS_END_STEPS] = {
50 RTL8723B_TRANS_ACT_TO_CARDEMU
51 RTL8723B_TRANS_CARDEMU_TO_CARDDIS
52 RTL8723B_TRANS_END
53};
54
55/*3 Card Enable Array*/
56struct wlan_pwr_cfg rtl8723B_card_enable_flow
57 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
58 RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
59 RTL8723B_TRANS_END_STEPS] = {
60 RTL8723B_TRANS_CARDDIS_TO_CARDEMU
61 RTL8723B_TRANS_CARDEMU_TO_ACT
62 RTL8723B_TRANS_END
63};
64
65/*3Suspend Array*/
66struct wlan_pwr_cfg rtl8723B_suspend_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
67 RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
68 RTL8723B_TRANS_END_STEPS] = {
69 RTL8723B_TRANS_ACT_TO_CARDEMU
70 RTL8723B_TRANS_CARDEMU_TO_SUS
71 RTL8723B_TRANS_END
72};
73
74/*3 Resume Array*/
75struct wlan_pwr_cfg rtl8723B_resume_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
76 RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
77 RTL8723B_TRANS_END_STEPS] = {
78 RTL8723B_TRANS_SUS_TO_CARDEMU
79 RTL8723B_TRANS_CARDEMU_TO_ACT
80 RTL8723B_TRANS_END
81};
82
83/*3HWPDN Array*/
84struct wlan_pwr_cfg rtl8723B_hwpdn_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
85 RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
86 RTL8723B_TRANS_END_STEPS] = {
87 RTL8723B_TRANS_ACT_TO_CARDEMU
88 RTL8723B_TRANS_CARDEMU_TO_PDN
89 RTL8723B_TRANS_END
90};
91
92/*3 Enter LPS */
93struct wlan_pwr_cfg rtl8723B_enter_lps_flow[RTL8723B_TRANS_ACT_TO_LPS_STEPS +
94 RTL8723B_TRANS_END_STEPS] = {
95 /*FW behavior*/
96 RTL8723B_TRANS_ACT_TO_LPS
97 RTL8723B_TRANS_END
98};
99
100/*3 Leave LPS */
101struct wlan_pwr_cfg rtl8723B_leave_lps_flow[RTL8723B_TRANS_LPS_TO_ACT_STEPS +
102 RTL8723B_TRANS_END_STEPS] = {
103 /*FW behavior*/
104 RTL8723B_TRANS_LPS_TO_ACT
105 RTL8723B_TRANS_END
106};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
new file mode 100644
index 000000000000..a62f43ed8d32
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
@@ -0,0 +1,304 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_PWRSEQ_H__
27#define __RTL8723BE_PWRSEQ_H__
28
29/* Check document WM-20130425-JackieLau-RTL8723B_Power_Architecture v05.vsd
30 * There are 6 HW Power States:
31 * 0: POFF--Power Off
32 * 1: PDN--Power Down
33 * 2: CARDEMU--Card Emulation
34 * 3: ACT--Active Mode
35 * 4: LPS--Low Power State
36 * 5: SUS--Suspend
37 *
38 * The transition from different states are defined below
39 * TRANS_CARDEMU_TO_ACT
40 * TRANS_ACT_TO_CARDEMU
41 * TRANS_CARDEMU_TO_SUS
42 * TRANS_SUS_TO_CARDEMU
43 * TRANS_CARDEMU_TO_PDN
44 * TRANS_ACT_TO_LPS
45 * TRANS_LPS_TO_ACT
46 *
47 * TRANS_END
48 */
49#define RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS 23
50#define RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS 15
51#define RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS 15
52#define RTL8723B_TRANS_SUS_TO_CARDEMU_STEPS 15
53#define RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS 15
54#define RTL8723B_TRANS_PDN_TO_CARDEMU_STEPS 15
55#define RTL8723B_TRANS_ACT_TO_LPS_STEPS 15
56#define RTL8723B_TRANS_LPS_TO_ACT_STEPS 15
57#define RTL8723B_TRANS_END_STEPS 1
58
59#define RTL8723B_TRANS_CARDEMU_TO_ACT \
60 {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
61 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
62 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
63 {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
64 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
65 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
66 {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
67 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
68 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS}, \
69 {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
70 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
71 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, \
72 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
73 PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)|BIT(2)), 0}, \
74 {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
75 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , 0}, \
76 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
77 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
78 {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
79 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , BIT(0)}, \
80 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
81 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
82 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
83 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
84 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
85 PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \
86 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
87 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
88 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
89 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \
90 {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
91 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)}, \
92 {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
93 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
94 {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
95 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
96 {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
97 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
98 {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
99 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
100 {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
101 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
102 {0x0068, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
103 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3), BIT(3)}, \
104 {0x0069, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
105 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)},
106
107#define RTL8723B_TRANS_ACT_TO_CARDEMU \
108 {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
109 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \
110 {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
111 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
112 {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
113 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
114 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
115 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
116 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
117 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \
118 {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
119 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), 0}, \
120 {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
121 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
122 PWR_CMD_WRITE, BIT(5), BIT(5)}, \
123 {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
124 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
125 PWR_CMD_WRITE, BIT(0), 0},
126
127#define RTL8723B_TRANS_CARDEMU_TO_SUS \
128 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
129 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), (BIT(4) | BIT(3))}, \
130 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
131 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
132 PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \
133 {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
134 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
135 {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
136 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \
137 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
138 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},\
139 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
140 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
141 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
142 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
143
144#define RTL8723B_TRANS_SUS_TO_CARDEMU \
145 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
146 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \
147 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
148 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
149 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
150 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
151 {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
152 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
153 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
154 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0},
155
156#define RTL8723B_TRANS_CARDEMU_TO_CARDDIS \
157 {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
158 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \
159 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
160 PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
161 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \
162 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
163 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, \
164 {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
165 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 1}, \
166 {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
167 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
168 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
169 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
170 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
171 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
172
173#define RTL8723B_TRANS_CARDDIS_TO_CARDEMU \
174 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
175 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \
176 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
177 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
178 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
179 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
180 {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
181 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
182 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
183 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \
184 {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
185 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
186 {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
187 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
188
189#define RTL8723B_TRANS_CARDEMU_TO_PDN \
190 {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
191 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
192 {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
193 PWR_INTF_SDIO_MSK | PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, \
194 PWR_CMD_WRITE, 0xFF, 0x20}, \
195 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
196 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
197 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
198 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},
199
200#define RTL8723B_TRANS_PDN_TO_CARDEMU \
201 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
202 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},
203
204#define RTL8723B_TRANS_ACT_TO_LPS \
205 {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
206 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
207 {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
208 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
209 {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
210 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
211 {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
212 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
213 {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
214 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
215 {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
216 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
217 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
218 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
219 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
220 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \
221 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
222 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
223 {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
224 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03}, \
225 {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
226 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
227 {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
228 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00}, \
229 {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
230 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},
231
232#define RTL8723B_TRANS_LPS_TO_ACT \
233 {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
234 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \
235 {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
236 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
237 {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
238 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
239 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
240 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \
241 {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
242 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
243 {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
244 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \
245 {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
246 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \
247 {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
248 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
249 {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
250 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
251 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
252 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \
253 {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
254 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
255
256#define RTL8723B_TRANS_END \
257 {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \
258 PWR_CMD_END, 0, 0},
259
260extern struct wlan_pwr_cfg rtl8723B_power_on_flow
261 [RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
262 RTL8723B_TRANS_END_STEPS];
263extern struct wlan_pwr_cfg rtl8723B_radio_off_flow
264 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
265 RTL8723B_TRANS_END_STEPS];
266extern struct wlan_pwr_cfg rtl8723B_card_disable_flow
267 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
268 RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
269 RTL8723B_TRANS_END_STEPS];
270extern struct wlan_pwr_cfg rtl8723B_card_enable_flow
271 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
272 RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
273 RTL8723B_TRANS_END_STEPS];
274extern struct wlan_pwr_cfg rtl8723B_suspend_flow
275 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
276 RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
277 RTL8723B_TRANS_END_STEPS];
278extern struct wlan_pwr_cfg rtl8723B_resume_flow
279 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
280 RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
281 RTL8723B_TRANS_END_STEPS];
282extern struct wlan_pwr_cfg rtl8723B_hwpdn_flow
283 [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
284 RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
285 RTL8723B_TRANS_END_STEPS];
286extern struct wlan_pwr_cfg rtl8723B_enter_lps_flow
287 [RTL8723B_TRANS_ACT_TO_LPS_STEPS +
288 RTL8723B_TRANS_END_STEPS];
289extern struct wlan_pwr_cfg rtl8723B_leave_lps_flow
290 [RTL8723B_TRANS_LPS_TO_ACT_STEPS +
291 RTL8723B_TRANS_END_STEPS];
292
293/* RTL8723 Power Configuration CMDs for PCIe interface */
294#define RTL8723_NIC_PWR_ON_FLOW rtl8723B_power_on_flow
295#define RTL8723_NIC_RF_OFF_FLOW rtl8723B_radio_off_flow
296#define RTL8723_NIC_DISABLE_FLOW rtl8723B_card_disable_flow
297#define RTL8723_NIC_ENABLE_FLOW rtl8723B_card_enable_flow
298#define RTL8723_NIC_SUSPEND_FLOW rtl8723B_suspend_flow
299#define RTL8723_NIC_RESUME_FLOW rtl8723B_resume_flow
300#define RTL8723_NIC_PDN_FLOW rtl8723B_hwpdn_flow
301#define RTL8723_NIC_LPS_ENTER_FLOW rtl8723B_enter_lps_flow
302#define RTL8723_NIC_LPS_LEAVE_FLOW rtl8723B_leave_lps_flow
303
304#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
new file mode 100644
index 000000000000..e4a507a756fb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
@@ -0,0 +1,140 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "pwrseq.h"
27
28/* Description:
29 * This routine deal with the Power Configuration CMDs
30 * parsing for RTL8723/RTL8188E Series IC.
31 * Assumption:
32 * We should follow specific format which was released from HW SD.
33 *
34 * 2011.07.07, added by Roger.
35 */
36bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
37 u8 fab_version, u8 interface_type,
38 struct wlan_pwr_cfg pwrcfgcmd[])
39
40{
41 struct wlan_pwr_cfg pwr_cfg_cmd = {0};
42 bool b_polling_bit = false;
43 u32 ary_idx = 0;
44 u8 value = 0;
45 u32 offset = 0;
46 u32 polling_count = 0;
47 u32 max_polling_cnt = 5000;
48
49 do {
50 pwr_cfg_cmd = pwrcfgcmd[ary_idx];
51 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
52 "rtlbe_hal_pwrseqcmdparsing(): "
53 "offset(%#x),cut_msk(%#x), fab_msk(%#x),"
54 "interface_msk(%#x), base(%#x), "
55 "cmd(%#x), msk(%#x), value(%#x)\n",
56 GET_PWR_CFG_OFFSET(pwr_cfg_cmd),
57 GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd),
58 GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd),
59 GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd),
60 GET_PWR_CFG_BASE(pwr_cfg_cmd),
61 GET_PWR_CFG_CMD(pwr_cfg_cmd),
62 GET_PWR_CFG_MASK(pwr_cfg_cmd),
63 GET_PWR_CFG_VALUE(pwr_cfg_cmd));
64
65 if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) &&
66 (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) &&
67 (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) {
68 switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) {
69 case PWR_CMD_READ:
70 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
71 "rtlbe_hal_pwrseqcmdparsing(): "
72 "PWR_CMD_READ\n");
73 break;
74 case PWR_CMD_WRITE:
75 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
76 "rtlbe_hal_pwrseqcmdparsing(): "
77 "PWR_CMD_WRITE\n");
78 offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
79
80 /*Read the value from system register*/
81 value = rtl_read_byte(rtlpriv, offset);
82 value &= (~(GET_PWR_CFG_MASK(pwr_cfg_cmd)));
83 value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
84 & GET_PWR_CFG_MASK(pwr_cfg_cmd));
85
86 /*Write the value back to sytem register*/
87 rtl_write_byte(rtlpriv, offset, value);
88 break;
89 case PWR_CMD_POLLING:
90 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
91 "rtlbe_hal_pwrseqcmdparsing(): "
92 "PWR_CMD_POLLING\n");
93 b_polling_bit = false;
94 offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
95
96 do {
97 value = rtl_read_byte(rtlpriv, offset);
98
99 value &= GET_PWR_CFG_MASK(pwr_cfg_cmd);
100 if (value ==
101 (GET_PWR_CFG_VALUE(pwr_cfg_cmd) &
102 GET_PWR_CFG_MASK(pwr_cfg_cmd)))
103 b_polling_bit = true;
104 else
105 udelay(10);
106
107 if (polling_count++ > max_polling_cnt)
108 return false;
109
110 } while (!b_polling_bit);
111 break;
112 case PWR_CMD_DELAY:
113 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
114 "rtlbe_hal_pwrseqcmdparsing(): "
115 "PWR_CMD_DELAY\n");
116 if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) ==
117 PWRSEQ_DELAY_US)
118 udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
119 else
120 mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
121 break;
122 case PWR_CMD_END:
123 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
124 "rtlbe_hal_pwrseqcmdparsing(): "
125 "PWR_CMD_END\n");
126 return true;
127 break;
128 default:
129 RT_ASSERT(false,
130 "rtlbe_hal_pwrseqcmdparsing(): "
131 "Unknown CMD!!\n");
132 break;
133 }
134 }
135
136 ary_idx++;
137 } while (1);
138
139 return true;
140}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h
new file mode 100644
index 000000000000..ce14a3b5cb71
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h
@@ -0,0 +1,95 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_PWRSEQCMD_H__
27#define __RTL8723BE_PWRSEQCMD_H__
28
29#include "../wifi.h"
30/*---------------------------------------------*/
31/*The value of cmd: 4 bits */
32/*---------------------------------------------*/
33#define PWR_CMD_READ 0x00
34#define PWR_CMD_WRITE 0x01
35#define PWR_CMD_POLLING 0x02
36#define PWR_CMD_DELAY 0x03
37#define PWR_CMD_END 0x04
38
39/* define the base address of each block */
40#define PWR_BASEADDR_MAC 0x00
41#define PWR_BASEADDR_USB 0x01
42#define PWR_BASEADDR_PCIE 0x02
43#define PWR_BASEADDR_SDIO 0x03
44
45#define PWR_INTF_SDIO_MSK BIT(0)
46#define PWR_INTF_USB_MSK BIT(1)
47#define PWR_INTF_PCI_MSK BIT(2)
48#define PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
49
50#define PWR_FAB_TSMC_MSK BIT(0)
51#define PWR_FAB_UMC_MSK BIT(1)
52#define PWR_FAB_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
53
54#define PWR_CUT_TESTCHIP_MSK BIT(0)
55#define PWR_CUT_A_MSK BIT(1)
56#define PWR_CUT_B_MSK BIT(2)
57#define PWR_CUT_C_MSK BIT(3)
58#define PWR_CUT_D_MSK BIT(4)
59#define PWR_CUT_E_MSK BIT(5)
60#define PWR_CUT_F_MSK BIT(6)
61#define PWR_CUT_G_MSK BIT(7)
62#define PWR_CUT_ALL_MSK 0xFF
63
64
65enum pwrseq_delay_unit {
66 PWRSEQ_DELAY_US,
67 PWRSEQ_DELAY_MS,
68};
69
70struct wlan_pwr_cfg {
71 u16 offset;
72 u8 cut_msk;
73 u8 fab_msk:4;
74 u8 interface_msk:4;
75 u8 base:4;
76 u8 cmd:4;
77 u8 msk;
78 u8 value;
79
80};
81
82#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
83#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
84#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
85#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
86#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
87#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
88#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
89#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
90
91bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
92 u8 fab_version, u8 interface_type,
93 struct wlan_pwr_cfg pwrcfgcmd[]);
94
95#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h
new file mode 100644
index 000000000000..4c653fab8795
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h
@@ -0,0 +1,2277 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_REG_H__
27#define __RTL8723BE_REG_H__
28
29#define TXPKT_BUF_SELECT 0x69
30#define RXPKT_BUF_SELECT 0xA5
31#define DISABLE_TRXPKT_BUF_ACCESS 0x0
32
33#define REG_SYS_ISO_CTRL 0x0000
34#define REG_SYS_FUNC_EN 0x0002
35#define REG_APS_FSMCO 0x0004
36#define REG_SYS_CLKR 0x0008
37#define REG_9346CR 0x000A
38#define REG_EE_VPD 0x000C
39#define REG_AFE_MISC 0x0010
40#define REG_SPS0_CTRL 0x0011
41#define REG_SPS_OCP_CFG 0x0018
42#define REG_RSV_CTRL 0x001C
43#define REG_RF_CTRL 0x001F
44#define REG_LDOA15_CTRL 0x0020
45#define REG_LDOV12D_CTRL 0x0021
46#define REG_LDOHCI12_CTRL 0x0022
47#define REG_LPLDO_CTRL 0x0023
48#define REG_AFE_XTAL_CTRL 0x0024
49/* 1.5v for 8188EE test chip, 1.4v for MP chip */
50#define REG_AFE_LDO_CTRL 0x0027
51#define REG_AFE_PLL_CTRL 0x0028
52#define REG_MAC_PHY_CTRL 0x002c
53#define REG_EFUSE_CTRL 0x0030
54#define REG_EFUSE_TEST 0x0034
55#define REG_PWR_DATA 0x0038
56#define REG_CAL_TIMER 0x003C
57#define REG_ACLK_MON 0x003E
58#define REG_GPIO_MUXCFG 0x0040
59#define REG_GPIO_IO_SEL 0x0042
60#define REG_MAC_PINMUX_CFG 0x0043
61#define REG_GPIO_PIN_CTRL 0x0044
62#define REG_GPIO_INTM 0x0048
63#define REG_LEDCFG0 0x004C
64#define REG_LEDCFG1 0x004D
65#define REG_LEDCFG2 0x004E
66#define REG_LEDCFG3 0x004F
67#define REG_FSIMR 0x0050
68#define REG_FSISR 0x0054
69#define REG_HSIMR 0x0058
70#define REG_HSISR 0x005c
71#define REG_GPIO_PIN_CTRL_2 0x0060
72#define REG_GPIO_IO_SEL_2 0x0062
73#define REG_MULTI_FUNC_CTRL 0x0068
74#define REG_GPIO_OUTPUT 0x006c
75#define REG_AFE_XTAL_CTRL_EXT 0x0078
76#define REG_XCK_OUT_CTRL 0x007c
77#define REG_MCUFWDL 0x0080
78#define REG_WOL_EVENT 0x0081
79#define REG_MCUTSTCFG 0x0084
80
81
82#define REG_HIMR 0x00B0
83#define REG_HISR 0x00B4
84#define REG_HIMRE 0x00B8
85#define REG_HISRE 0x00BC
86
87#define REG_EFUSE_ACCESS 0x00CF
88
89#define REG_BIST_SCAN 0x00D0
90#define REG_BIST_RPT 0x00D4
91#define REG_BIST_ROM_RPT 0x00D8
92#define REG_USB_SIE_INTF 0x00E0
93#define REG_PCIE_MIO_INTF 0x00E4
94#define REG_PCIE_MIO_INTD 0x00E8
95#define REG_HPON_FSM 0x00EC
96#define REG_SYS_CFG 0x00F0
97#define REG_GPIO_OUTSTS 0x00F4
98#define REG_SYS_CFG1 0x00F0
99#define REG_ROM_VERSION 0x00FD
100
101#define REG_CR 0x0100
102#define REG_PBP 0x0104
103#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
104#define REG_TRXDMA_CTRL 0x010C
105#define REG_TRXFF_BNDY 0x0114
106#define REG_TRXFF_STATUS 0x0118
107#define REG_RXFF_PTR 0x011C
108
109#define REG_CPWM 0x012F
110#define REG_FWIMR 0x0130
111#define REG_FWISR 0x0134
112#define REG_PKTBUF_DBG_CTRL 0x0140
113#define REG_PKTBUF_DBG_DATA_L 0x0144
114#define REG_PKTBUF_DBG_DATA_H 0x0148
115#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL + 2)
116
117#define REG_TC0_CTRL 0x0150
118#define REG_TC1_CTRL 0x0154
119#define REG_TC2_CTRL 0x0158
120#define REG_TC3_CTRL 0x015C
121#define REG_TC4_CTRL 0x0160
122#define REG_TCUNIT_BASE 0x0164
123#define REG_MBIST_START 0x0174
124#define REG_MBIST_DONE 0x0178
125#define REG_MBIST_FAIL 0x017C
126#define REG_32K_CTRL 0x0194
127#define REG_C2HEVT_MSG_NORMAL 0x01A0
128#define REG_C2HEVT_CLEAR 0x01AF
129#define REG_C2HEVT_MSG_TEST 0x01B8
130#define REG_MCUTST_1 0x01c0
131#define REG_FMETHR 0x01C8
132#define REG_HMETFR 0x01CC
133#define REG_HMEBOX_0 0x01D0
134#define REG_HMEBOX_1 0x01D4
135#define REG_HMEBOX_2 0x01D8
136#define REG_HMEBOX_3 0x01DC
137
138#define REG_LLT_INIT 0x01E0
139#define REG_BB_ACCEESS_CTRL 0x01E8
140#define REG_BB_ACCESS_DATA 0x01EC
141
142#define REG_HMEBOX_EXT_0 0x01F0
143#define REG_HMEBOX_EXT_1 0x01F4
144#define REG_HMEBOX_EXT_2 0x01F8
145#define REG_HMEBOX_EXT_3 0x01FC
146
147#define REG_RQPN 0x0200
148#define REG_FIFOPAGE 0x0204
149#define REG_TDECTRL 0x0208
150#define REG_TXDMA_OFFSET_CHK 0x020C
151#define REG_TXDMA_STATUS 0x0210
152#define REG_RQPN_NPQ 0x0214
153
154#define REG_RXDMA_AGG_PG_TH 0x0280
155/* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
156#define REG_FW_UPD_RDPTR 0x0284
157/* Control the RX DMA.*/
158#define REG_RXDMA_CONTROL 0x0286
159/* The number of packets in RXPKTBUF. */
160#define REG_RXPKT_NUM 0x0287
161
162#define REG_PCIE_CTRL_REG 0x0300
163#define REG_INT_MIG 0x0304
164#define REG_BCNQ_DESA 0x0308
165#define REG_HQ_DESA 0x0310
166#define REG_MGQ_DESA 0x0318
167#define REG_VOQ_DESA 0x0320
168#define REG_VIQ_DESA 0x0328
169#define REG_BEQ_DESA 0x0330
170#define REG_BKQ_DESA 0x0338
171#define REG_RX_DESA 0x0340
172
173#define REG_DBI 0x0348
174#define REG_MDIO 0x0354
175#define REG_DBG_SEL 0x0360
176#define REG_PCIE_HRPWM 0x0361
177#define REG_PCIE_HCPWM 0x0363
178#define REG_UART_CTRL 0x0364
179#define REG_WATCH_DOG 0x0368
180#define REG_UART_TX_DESA 0x0370
181#define REG_UART_RX_DESA 0x0378
182
183
184#define REG_HDAQ_DESA_NODEF 0x0000
185#define REG_CMDQ_DESA_NODEF 0x0000
186
187#define REG_VOQ_INFORMATION 0x0400
188#define REG_VIQ_INFORMATION 0x0404
189#define REG_BEQ_INFORMATION 0x0408
190#define REG_BKQ_INFORMATION 0x040C
191#define REG_MGQ_INFORMATION 0x0410
192#define REG_HGQ_INFORMATION 0x0414
193#define REG_BCNQ_INFORMATION 0x0418
194#define REG_TXPKT_EMPTY 0x041A
195
196
197#define REG_CPU_MGQ_INFORMATION 0x041C
198#define REG_FWHW_TXQ_CTRL 0x0420
199#define REG_HWSEQ_CTRL 0x0423
200#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
201#define REG_TXPKTBUF_MGQ_BDNY 0x0425
202#define REG_MULTI_BCNQ_EN 0x0426
203#define REG_MULTI_BCNQ_OFFSET 0x0427
204#define REG_SPEC_SIFS 0x0428
205#define REG_RL 0x042A
206#define REG_DARFRC 0x0430
207#define REG_RARFRC 0x0438
208#define REG_RRSR 0x0440
209#define REG_ARFR0 0x0444
210#define REG_ARFR1 0x0448
211#define REG_ARFR2 0x044C
212#define REG_ARFR3 0x0450
213#define REG_AMPDU_MAX_TIME 0x0456
214#define REG_AGGLEN_LMT 0x0458
215#define REG_AMPDU_MIN_SPACE 0x045C
216#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
217#define REG_FAST_EDCA_CTRL 0x0460
218#define REG_RD_RESP_PKT_TH 0x0463
219#define REG_INIRTS_RATE_SEL 0x0480
220#define REG_INIDATA_RATE_SEL 0x0484
221#define REG_POWER_STATUS 0x04A4
222#define REG_POWER_STAGE1 0x04B4
223#define REG_POWER_STAGE2 0x04B8
224#define REG_PKT_LIFE_TIME 0x04C0
225#define REG_STBC_SETTING 0x04C4
226#define REG_PROT_MODE_CTRL 0x04C8
227#define REG_BAR_MODE_CTRL 0x04CC
228#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
229#define REG_EARLY_MODE_CONTROL 0x04D0
230#define REG_NQOS_SEQ 0x04DC
231#define REG_QOS_SEQ 0x04DE
232#define REG_NEED_CPU_HANDLE 0x04E0
233#define REG_PKT_LOSE_RPT 0x04E1
234#define REG_PTCL_ERR_STATUS 0x04E2
235#define REG_TX_RPT_CTRL 0x04EC
236#define REG_TX_RPT_TIME 0x04F0
237#define REG_DUMMY 0x04FC
238
239#define REG_EDCA_VO_PARAM 0x0500
240#define REG_EDCA_VI_PARAM 0x0504
241#define REG_EDCA_BE_PARAM 0x0508
242#define REG_EDCA_BK_PARAM 0x050C
243#define REG_BCNTCFG 0x0510
244#define REG_PIFS 0x0512
245#define REG_RDG_PIFS 0x0513
246#define REG_SIFS_CTX 0x0514
247#define REG_SIFS_TRX 0x0516
248#define REG_AGGR_BREAK_TIME 0x051A
249#define REG_SLOT 0x051B
250#define REG_TX_PTCL_CTRL 0x0520
251#define REG_TXPAUSE 0x0522
252#define REG_DIS_TXREQ_CLR 0x0523
253#define REG_RD_CTRL 0x0524
254#define REG_TBTT_PROHIBIT 0x0540
255#define REG_RD_NAV_NXT 0x0544
256#define REG_NAV_PROT_LEN 0x0546
257#define REG_BCN_CTRL 0x0550
258#define REG_USTIME_TSF 0x0551
259#define REG_MBID_NUM 0x0552
260#define REG_DUAL_TSF_RST 0x0553
261#define REG_BCN_INTERVAL 0x0554
262#define REG_MBSSID_BCN_SPACE 0x0554
263#define REG_DRVERLYINT 0x0558
264#define REG_BCNDMATIM 0x0559
265#define REG_ATIMWND 0x055A
266#define REG_BCN_MAX_ERR 0x055D
267#define REG_RXTSF_OFFSET_CCK 0x055E
268#define REG_RXTSF_OFFSET_OFDM 0x055F
269#define REG_TSFTR 0x0560
270#define REG_INIT_TSFTR 0x0564
271#define REG_SECONDARY_CCA_CTRL 0x0577
272#define REG_PSTIMER 0x0580
273#define REG_TIMER0 0x0584
274#define REG_TIMER1 0x0588
275#define REG_ACMHWCTRL 0x05C0
276#define REG_ACMRSTCTRL 0x05C1
277#define REG_ACMAVG 0x05C2
278#define REG_VO_ADMTIME 0x05C4
279#define REG_VI_ADMTIME 0x05C6
280#define REG_BE_ADMTIME 0x05C8
281#define REG_EDCA_RANDOM_GEN 0x05CC
282#define REG_SCH_TXCMD 0x05D0
283
284#define REG_APSD_CTRL 0x0600
285#define REG_BWOPMODE 0x0603
286#define REG_TCR 0x0604
287#define REG_RCR 0x0608
288#define REG_RX_PKT_LIMIT 0x060C
289#define REG_RX_DLK_TIME 0x060D
290#define REG_RX_DRVINFO_SZ 0x060F
291
292#define REG_MACID 0x0610
293#define REG_BSSID 0x0618
294#define REG_MAR 0x0620
295#define REG_MBIDCAMCFG 0x0628
296
297#define REG_USTIME_EDCA 0x0638
298#define REG_MAC_SPEC_SIFS 0x063A
299#define REG_RESP_SIFS_CCK 0x063C
300#define REG_RESP_SIFS_OFDM 0x063E
301#define REG_ACKTO 0x0640
302#define REG_CTS2TO 0x0641
303#define REG_EIFS 0x0642
304
305#define REG_NAV_CTRL 0x0650
306#define REG_BACAMCMD 0x0654
307#define REG_BACAMCONTENT 0x0658
308#define REG_LBDLY 0x0660
309#define REG_FWDLY 0x0661
310#define REG_RXERR_RPT 0x0664
311#define REG_TRXPTCL_CTL 0x0668
312
313#define REG_CAMCMD 0x0670
314#define REG_CAMWRITE 0x0674
315#define REG_CAMREAD 0x0678
316#define REG_CAMDBG 0x067C
317#define REG_SECCFG 0x0680
318
319#define REG_WOW_CTRL 0x0690
320#define REG_PSSTATUS 0x0691
321#define REG_PS_RX_INFO 0x0692
322#define REG_UAPSD_TID 0x0693
323#define REG_LPNAV_CTRL 0x0694
324#define REG_WKFMCAM_NUM 0x0698
325#define REG_WKFMCAM_RWD 0x069C
326#define REG_RXFLTMAP0 0x06A0
327#define REG_RXFLTMAP1 0x06A2
328#define REG_RXFLTMAP2 0x06A4
329#define REG_BCN_PSR_RPT 0x06A8
330#define REG_CALB32K_CTRL 0x06AC
331#define REG_PKT_MON_CTRL 0x06B4
332#define REG_BT_COEX_TABLE 0x06C0
333#define REG_WMAC_RESP_TXINFO 0x06D8
334
335#define REG_USB_INFO 0xFE17
336#define REG_USB_SPECIAL_OPTION 0xFE55
337#define REG_USB_DMA_AGG_TO 0xFE5B
338#define REG_USB_AGG_TO 0xFE5C
339#define REG_USB_AGG_TH 0xFE5D
340
341#define REG_TEST_USB_TXQS 0xFE48
342#define REG_TEST_SIE_VID 0xFE60
343#define REG_TEST_SIE_PID 0xFE62
344#define REG_TEST_SIE_OPTIONAL 0xFE64
345#define REG_TEST_SIE_CHIRP_K 0xFE65
346#define REG_TEST_SIE_PHY 0xFE66
347#define REG_TEST_SIE_MAC_ADDR 0xFE70
348#define REG_TEST_SIE_STRING 0xFE80
349
350#define REG_NORMAL_SIE_VID 0xFE60
351#define REG_NORMAL_SIE_PID 0xFE62
352#define REG_NORMAL_SIE_OPTIONAL 0xFE64
353#define REG_NORMAL_SIE_EP 0xFE65
354#define REG_NORMAL_SIE_PHY 0xFE68
355#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
356#define REG_NORMAL_SIE_STRING 0xFE80
357
358#define CR9346 REG_9346CR
359#define MSR (REG_CR + 2)
360#define ISR REG_HISR
361#define TSFR REG_TSFTR
362
363#define MACIDR0 REG_MACID
364#define MACIDR4 (REG_MACID + 4)
365
366#define PBP REG_PBP
367
368#define IDR0 MACIDR0
369#define IDR4 MACIDR4
370
371#define UNUSED_REGISTER 0x1BF
372#define DCAM UNUSED_REGISTER
373#define PSR UNUSED_REGISTER
374#define BBADDR UNUSED_REGISTER
375#define PHYDATAR UNUSED_REGISTER
376
377#define INVALID_BBRF_VALUE 0x12345678
378
379#define MAX_MSS_DENSITY_2T 0x13
380#define MAX_MSS_DENSITY_1T 0x0A
381
382#define CMDEEPROM_EN BIT(5)
383#define CMDEEPROM_SEL BIT(4)
384#define CMD9346CR_9356SEL BIT(4)
385#define AUTOLOAD_EEPROM (CMDEEPROM_EN | CMDEEPROM_SEL)
386#define AUTOLOAD_EFUSE CMDEEPROM_EN
387
388#define GPIOSEL_GPIO 0
389#define GPIOSEL_ENBT BIT(5)
390
391#define GPIO_IN REG_GPIO_PIN_CTRL
392#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1)
393#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2)
394#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3)
395
396/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
397#define HSIMR_GPIO12_0_INT_EN BIT(0)
398#define HSIMR_SPS_OCP_INT_EN BIT(5)
399#define HSIMR_RON_INT_EN BIT(6)
400#define HSIMR_PDN_INT_EN BIT(7)
401#define HSIMR_GPIO9_INT_EN BIT(25)
402
403/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
404
405#define HSISR_GPIO12_0_INT BIT(0)
406#define HSISR_SPS_OCP_INT BIT(5)
407#define HSISR_RON_INT_EN BIT(6)
408#define HSISR_PDNINT BIT(7)
409#define HSISR_GPIO9_INT BIT(25)
410
411#define MSR_NOLINK 0x00
412#define MSR_ADHOC 0x01
413#define MSR_INFRA 0x02
414#define MSR_AP 0x03
415
416#define RRSR_RSC_OFFSET 21
417#define RRSR_SHORT_OFFSET 23
418#define RRSR_RSC_BW_40M 0x600000
419#define RRSR_RSC_UPSUBCHNL 0x400000
420#define RRSR_RSC_LOWSUBCHNL 0x200000
421#define RRSR_SHORT 0x800000
422#define RRSR_1M BIT(0)
423#define RRSR_2M BIT(1)
424#define RRSR_5_5M BIT(2)
425#define RRSR_11M BIT(3)
426#define RRSR_6M BIT(4)
427#define RRSR_9M BIT(5)
428#define RRSR_12M BIT(6)
429#define RRSR_18M BIT(7)
430#define RRSR_24M BIT(8)
431#define RRSR_36M BIT(9)
432#define RRSR_48M BIT(10)
433#define RRSR_54M BIT(11)
434#define RRSR_MCS0 BIT(12)
435#define RRSR_MCS1 BIT(13)
436#define RRSR_MCS2 BIT(14)
437#define RRSR_MCS3 BIT(15)
438#define RRSR_MCS4 BIT(16)
439#define RRSR_MCS5 BIT(17)
440#define RRSR_MCS6 BIT(18)
441#define RRSR_MCS7 BIT(19)
442#define BRSR_ACKSHORTPMB BIT(23)
443
444#define RATR_1M 0x00000001
445#define RATR_2M 0x00000002
446#define RATR_55M 0x00000004
447#define RATR_11M 0x00000008
448#define RATR_6M 0x00000010
449#define RATR_9M 0x00000020
450#define RATR_12M 0x00000040
451#define RATR_18M 0x00000080
452#define RATR_24M 0x00000100
453#define RATR_36M 0x00000200
454#define RATR_48M 0x00000400
455#define RATR_54M 0x00000800
456#define RATR_MCS0 0x00001000
457#define RATR_MCS1 0x00002000
458#define RATR_MCS2 0x00004000
459#define RATR_MCS3 0x00008000
460#define RATR_MCS4 0x00010000
461#define RATR_MCS5 0x00020000
462#define RATR_MCS6 0x00040000
463#define RATR_MCS7 0x00080000
464#define RATR_MCS8 0x00100000
465#define RATR_MCS9 0x00200000
466#define RATR_MCS10 0x00400000
467#define RATR_MCS11 0x00800000
468#define RATR_MCS12 0x01000000
469#define RATR_MCS13 0x02000000
470#define RATR_MCS14 0x04000000
471#define RATR_MCS15 0x08000000
472
473#define RATE_1M BIT(0)
474#define RATE_2M BIT(1)
475#define RATE_5_5M BIT(2)
476#define RATE_11M BIT(3)
477#define RATE_6M BIT(4)
478#define RATE_9M BIT(5)
479#define RATE_12M BIT(6)
480#define RATE_18M BIT(7)
481#define RATE_24M BIT(8)
482#define RATE_36M BIT(9)
483#define RATE_48M BIT(10)
484#define RATE_54M BIT(11)
485#define RATE_MCS0 BIT(12)
486#define RATE_MCS1 BIT(13)
487#define RATE_MCS2 BIT(14)
488#define RATE_MCS3 BIT(15)
489#define RATE_MCS4 BIT(16)
490#define RATE_MCS5 BIT(17)
491#define RATE_MCS6 BIT(18)
492#define RATE_MCS7 BIT(19)
493#define RATE_MCS8 BIT(20)
494#define RATE_MCS9 BIT(21)
495#define RATE_MCS10 BIT(22)
496#define RATE_MCS11 BIT(23)
497#define RATE_MCS12 BIT(24)
498#define RATE_MCS13 BIT(25)
499#define RATE_MCS14 BIT(26)
500#define RATE_MCS15 BIT(27)
501
502#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
503#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
504 RATR_24M | RATR_36M | RATR_48M | RATR_54M)
505#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
506 RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
507 RATR_MCS6 | RATR_MCS7)
508#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
509 RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\
510 RATR_MCS14 | RATR_MCS15)
511
512#define BW_OPMODE_20MHZ BIT(2)
513#define BW_OPMODE_5G BIT(1)
514#define BW_OPMODE_11J BIT(0)
515
516#define CAM_VALID BIT(15)
517#define CAM_NOTVALID 0x0000
518#define CAM_USEDK BIT(5)
519
520#define CAM_NONE 0x0
521#define CAM_WEP40 0x01
522#define CAM_TKIP 0x02
523#define CAM_AES 0x04
524#define CAM_WEP104 0x05
525
526#define TOTAL_CAM_ENTRY 32
527#define HALF_CAM_ENTRY 16
528
529#define CAM_WRITE BIT(16)
530#define CAM_READ 0x00000000
531#define CAM_POLLINIG BIT(31)
532
533#define SCR_USEDK 0x01
534#define SCR_TXSEC_ENABLE 0x02
535#define SCR_RXSEC_ENABLE 0x04
536
537#define WOW_PMEN BIT(0)
538#define WOW_WOMEN BIT(1)
539#define WOW_MAGIC BIT(2)
540#define WOW_UWF BIT(3)
541
542/*********************************************
543* 8723BE IMR/ISR bits
544**********************************************/
545#define IMR_DISABLED 0x0
546/* IMR DW0(0x0060-0063) Bit 0-31 */
547#define IMR_TXCCK BIT(30) /* TXRPT interrupt when
548 * CCX bit of the packet is set
549 */
550#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */
551#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires,
552 * this bit is set to 1
553 */
554#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires,
555 * this bit is set to 1
556 */
557#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */
558#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */
559#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle
560 * indication interrupt
561 */
562#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
563#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */
564#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is
565 * true, this bit is set to 1)
566 */
567#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt
568 * Extension for Win7
569 */
570#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */
571#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is
572 * true, this bit is set to 1)
573 */
574#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status,
575 * Write 1 clear
576 */
577#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status,
578 * Write 1 clear
579 */
580#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status,
581 * Write 1 clear
582 */
583#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */
584#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */
585#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */
586#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */
587#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */
588#define IMR_VODOK BIT(2) /* AC_VO DMA OK */
589#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */
590#define IMR_ROK BIT(0) /* Receive DMA OK */
591
592/* IMR DW1(0x00B4-00B7) Bit 0-31 */
593#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
594#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
595#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
596#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
597#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
598#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
599#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
600#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */
601#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */
602#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */
603#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */
604#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */
605#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */
606#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */
607#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */
608#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status,
609 * write 1 clear.
610 */
611#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status,
612 * Write 1 clear
613 */
614#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */
615#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */
616
617#define HWSET_MAX_SIZE 512
618#define EFUSE_MAX_SECTION 64
619#define EFUSE_REAL_CONTENT_LEN 256
620#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header,
621 * dummy 7 bytes frome CP test
622 * and reserved 1byte.
623 */
624
625#define EEPROM_DEFAULT_TSSI 0x0
626#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
627#define EEPROM_DEFAULT_CRYSTALCAP 0x5
628#define EEPROM_DEFAULT_BOARDTYPE 0x02
629#define EEPROM_DEFAULT_TXPOWER 0x1010
630#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
631
632#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
633#define EEPROM_DEFAULT_THERMALMETER 0x18
634#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
635#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
636#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
637#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
638#define EEPROM_DEFAULT_HT20_DIFF 2
639#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
640#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
641#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
642
643#define RF_OPTION1 0x79
644#define RF_OPTION2 0x7A
645#define RF_OPTION3 0x7B
646#define RF_OPTION4 0xC3
647
648#define EEPROM_DEFAULT_PID 0x1234
649#define EEPROM_DEFAULT_VID 0x5678
650#define EEPROM_DEFAULT_CUSTOMERID 0xAB
651#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
652#define EEPROM_DEFAULT_VERSION 0
653
654#define EEPROM_CHANNEL_PLAN_FCC 0x0
655#define EEPROM_CHANNEL_PLAN_IC 0x1
656#define EEPROM_CHANNEL_PLAN_ETSI 0x2
657#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
658#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
659#define EEPROM_CHANNEL_PLAN_MKK 0x5
660#define EEPROM_CHANNEL_PLAN_MKK1 0x6
661#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
662#define EEPROM_CHANNEL_PLAN_TELEC 0x8
663#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
664#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
665#define EEPROM_CHANNEL_PLAN_NCC 0xB
666#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
667
668#define EEPROM_CID_DEFAULT 0x0
669#define EEPROM_CID_TOSHIBA 0x4
670#define EEPROM_CID_CCX 0x10
671#define EEPROM_CID_QMI 0x0D
672#define EEPROM_CID_WHQL 0xFE
673
674#define RTL8723BE_EEPROM_ID 0x8129
675
676#define EEPROM_HPON 0x02
677#define EEPROM_CLK 0x06
678#define EEPROM_TESTR 0x08
679
680
681#define EEPROM_TXPOWERCCK 0x10
682#define EEPROM_TXPOWERHT40_1S 0x16
683#define EEPROM_TXPOWERHT20DIFF 0x1B
684#define EEPROM_TXPOWER_OFDMDIFF 0x1B
685
686
687
688#define EEPROM_TX_PWR_INX 0x10
689
690#define EEPROM_CHANNELPLAN 0xB8
691#define EEPROM_XTAL_8723BE 0xB9
692#define EEPROM_THERMAL_METER_88E 0xBA
693#define EEPROM_IQK_LCK_88E 0xBB
694
695#define EEPROM_RF_BOARD_OPTION_88E 0xC1
696#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
697#define EEPROM_RF_BT_SETTING_88E 0xC3
698#define EEPROM_VERSION 0xC4
699#define EEPROM_CUSTOMER_ID 0xC5
700#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
701
702#define EEPROM_MAC_ADDR 0xD0
703#define EEPROM_VID 0xD6
704#define EEPROM_DID 0xD8
705#define EEPROM_SVID 0xDA
706#define EEPROM_SMID 0xDC
707
708#define STOPBECON BIT(6)
709#define STOPHIGHT BIT(5)
710#define STOPMGT BIT(4)
711#define STOPVO BIT(3)
712#define STOPVI BIT(2)
713#define STOPBE BIT(1)
714#define STOPBK BIT(0)
715
716#define RCR_APPFCS BIT(31)
717#define RCR_APP_MIC BIT(30)
718#define RCR_APP_ICV BIT(29)
719#define RCR_APP_PHYST_RXFF BIT(28)
720#define RCR_APP_BA_SSN BIT(27)
721#define RCR_ENMBID BIT(24)
722#define RCR_LSIGEN BIT(23)
723#define RCR_MFBEN BIT(22)
724#define RCR_HTC_LOC_CTRL BIT(14)
725#define RCR_AMF BIT(13)
726#define RCR_ACF BIT(12)
727#define RCR_ADF BIT(11)
728#define RCR_AICV BIT(9)
729#define RCR_ACRC32 BIT(8)
730#define RCR_CBSSID_BCN BIT(7)
731#define RCR_CBSSID_DATA BIT(6)
732#define RCR_CBSSID RCR_CBSSID_DATA
733#define RCR_APWRMGT BIT(5)
734#define RCR_ADD3 BIT(4)
735#define RCR_AB BIT(3)
736#define RCR_AM BIT(2)
737#define RCR_APM BIT(1)
738#define RCR_AAP BIT(0)
739#define RCR_MXDMA_OFFSET 8
740#define RCR_FIFO_OFFSET 13
741
742#define RSV_CTRL 0x001C
743#define RD_CTRL 0x0524
744
745#define REG_USB_INFO 0xFE17
746#define REG_USB_SPECIAL_OPTION 0xFE55
747#define REG_USB_DMA_AGG_TO 0xFE5B
748#define REG_USB_AGG_TO 0xFE5C
749#define REG_USB_AGG_TH 0xFE5D
750
751#define REG_USB_VID 0xFE60
752#define REG_USB_PID 0xFE62
753#define REG_USB_OPTIONAL 0xFE64
754#define REG_USB_CHIRP_K 0xFE65
755#define REG_USB_PHY 0xFE66
756#define REG_USB_MAC_ADDR 0xFE70
757#define REG_USB_HRPWM 0xFE58
758#define REG_USB_HCPWM 0xFE57
759
760#define SW18_FPWM BIT(3)
761
762#define ISO_MD2PP BIT(0)
763#define ISO_UA2USB BIT(1)
764#define ISO_UD2CORE BIT(2)
765#define ISO_PA2PCIE BIT(3)
766#define ISO_PD2CORE BIT(4)
767#define ISO_IP2MAC BIT(5)
768#define ISO_DIOP BIT(6)
769#define ISO_DIOE BIT(7)
770#define ISO_EB2CORE BIT(8)
771#define ISO_DIOR BIT(9)
772
773#define PWC_EV25V BIT(14)
774#define PWC_EV12V BIT(15)
775
776#define FEN_BBRSTB BIT(0)
777#define FEN_BB_GLB_RSTN BIT(1)
778#define FEN_USBA BIT(2)
779#define FEN_UPLL BIT(3)
780#define FEN_USBD BIT(4)
781#define FEN_DIO_PCIE BIT(5)
782#define FEN_PCIEA BIT(6)
783#define FEN_PPLL BIT(7)
784#define FEN_PCIED BIT(8)
785#define FEN_DIOE BIT(9)
786#define FEN_CPUEN BIT(10)
787#define FEN_DCORE BIT(11)
788#define FEN_ELDR BIT(12)
789#define FEN_DIO_RF BIT(13)
790#define FEN_HWPDN BIT(14)
791#define FEN_MREGEN BIT(15)
792
793#define PFM_LDALL BIT(0)
794#define PFM_ALDN BIT(1)
795#define PFM_LDKP BIT(2)
796#define PFM_WOWL BIT(3)
797#define ENPDN BIT(4)
798#define PDN_PL BIT(5)
799#define APFM_ONMAC BIT(8)
800#define APFM_OFF BIT(9)
801#define APFM_RSM BIT(10)
802#define AFSM_HSUS BIT(11)
803#define AFSM_PCIE BIT(12)
804#define APDM_MAC BIT(13)
805#define APDM_HOST BIT(14)
806#define APDM_HPDN BIT(15)
807#define RDY_MACON BIT(16)
808#define SUS_HOST BIT(17)
809#define ROP_ALD BIT(20)
810#define ROP_PWR BIT(21)
811#define ROP_SPS BIT(22)
812#define SOP_MRST BIT(25)
813#define SOP_FUSE BIT(26)
814#define SOP_ABG BIT(27)
815#define SOP_AMB BIT(28)
816#define SOP_RCK BIT(29)
817#define SOP_A8M BIT(30)
818#define XOP_BTCK BIT(31)
819
820#define ANAD16V_EN BIT(0)
821#define ANA8M BIT(1)
822#define MACSLP BIT(4)
823#define LOADER_CLK_EN BIT(5)
824#define _80M_SSC_DIS BIT(7)
825#define _80M_SSC_EN_HO BIT(8)
826#define PHY_SSC_RSTB BIT(9)
827#define SEC_CLK_EN BIT(10)
828#define MAC_CLK_EN BIT(11)
829#define SYS_CLK_EN BIT(12)
830#define RING_CLK_EN BIT(13)
831
832#define BOOT_FROM_EEPROM BIT(4)
833#define EEPROM_EN BIT(5)
834
835#define AFE_BGEN BIT(0)
836#define AFE_MBEN BIT(1)
837#define MAC_ID_EN BIT(7)
838
839#define WLOCK_ALL BIT(0)
840#define WLOCK_00 BIT(1)
841#define WLOCK_04 BIT(2)
842#define WLOCK_08 BIT(3)
843#define WLOCK_40 BIT(4)
844#define R_DIS_PRST_0 BIT(5)
845#define R_DIS_PRST_1 BIT(6)
846#define LOCK_ALL_EN BIT(7)
847
848#define RF_EN BIT(0)
849#define RF_RSTB BIT(1)
850#define RF_SDMRSTB BIT(2)
851
852#define LDA15_EN BIT(0)
853#define LDA15_STBY BIT(1)
854#define LDA15_OBUF BIT(2)
855#define LDA15_REG_VOS BIT(3)
856#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
857
858#define LDV12_EN BIT(0)
859#define LDV12_SDBY BIT(1)
860#define LPLDO_HSM BIT(2)
861#define LPLDO_LSM_DIS BIT(3)
862#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
863
864#define XTAL_EN BIT(0)
865#define XTAL_BSEL BIT(1)
866#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
867#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
868#define XTAL_GATE_USB BIT(8)
869#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
870#define XTAL_GATE_AFE BIT(11)
871#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
872#define XTAL_RF_GATE BIT(14)
873#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
874#define XTAL_GATE_DIG BIT(17)
875#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
876#define XTAL_BT_GATE BIT(20)
877#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
878#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
879
880#define CKDLY_AFE BIT(26)
881#define CKDLY_USB BIT(27)
882#define CKDLY_DIG BIT(28)
883#define CKDLY_BT BIT(29)
884
885#define APLL_EN BIT(0)
886#define APLL_320_EN BIT(1)
887#define APLL_FREF_SEL BIT(2)
888#define APLL_EDGE_SEL BIT(3)
889#define APLL_WDOGB BIT(4)
890#define APLL_LPFEN BIT(5)
891
892#define APLL_REF_CLK_13MHZ 0x1
893#define APLL_REF_CLK_19_2MHZ 0x2
894#define APLL_REF_CLK_20MHZ 0x3
895#define APLL_REF_CLK_25MHZ 0x4
896#define APLL_REF_CLK_26MHZ 0x5
897#define APLL_REF_CLK_38_4MHZ 0x6
898#define APLL_REF_CLK_40MHZ 0x7
899
900#define APLL_320EN BIT(14)
901#define APLL_80EN BIT(15)
902#define APLL_1MEN BIT(24)
903
904#define ALD_EN BIT(18)
905#define EF_PD BIT(19)
906#define EF_FLAG BIT(31)
907
908#define EF_TRPT BIT(7)
909#define LDOE25_EN BIT(31)
910
911#define RSM_EN BIT(0)
912#define TIMER_EN BIT(4)
913
914#define TRSW0EN BIT(2)
915#define TRSW1EN BIT(3)
916#define EROM_EN BIT(4)
917#define ENBT BIT(5)
918#define ENUART BIT(8)
919#define UART_910 BIT(9)
920#define ENPMAC BIT(10)
921#define SIC_SWRST BIT(11)
922#define ENSIC BIT(12)
923#define SIC_23 BIT(13)
924#define ENHDP BIT(14)
925#define SIC_LBK BIT(15)
926
927#define LED0PL BIT(4)
928#define LED1PL BIT(12)
929#define LED0DIS BIT(7)
930
931#define MCUFWDL_EN BIT(0)
932#define MCUFWDL_RDY BIT(1)
933#define FWDL_CHKSUM_RPT BIT(2)
934#define MACINI_RDY BIT(3)
935#define BBINI_RDY BIT(4)
936#define RFINI_RDY BIT(5)
937#define WINTINI_RDY BIT(6)
938#define CPRST BIT(23)
939
940#define XCLK_VLD BIT(0)
941#define ACLK_VLD BIT(1)
942#define UCLK_VLD BIT(2)
943#define PCLK_VLD BIT(3)
944#define PCIRSTB BIT(4)
945#define V15_VLD BIT(5)
946#define TRP_B15V_EN BIT(7)
947#define SIC_IDLE BIT(8)
948#define BD_MAC2 BIT(9)
949#define BD_MAC1 BIT(10)
950#define IC_MACPHY_MODE BIT(11)
951#define VENDOR_ID BIT(19)
952#define PAD_HWPD_IDN BIT(22)
953#define TRP_VAUX_EN BIT(23)
954#define TRP_BT_EN BIT(24)
955#define BD_PKG_SEL BIT(25)
956#define BD_HCI_SEL BIT(26)
957#define TYPE_ID BIT(27)
958
959#define CHIP_VER_RTL_MASK 0xF000
960#define CHIP_VER_RTL_SHIFT 12
961
962#define REG_LBMODE (REG_CR + 3)
963
964#define HCI_TXDMA_EN BIT(0)
965#define HCI_RXDMA_EN BIT(1)
966#define TXDMA_EN BIT(2)
967#define RXDMA_EN BIT(3)
968#define PROTOCOL_EN BIT(4)
969#define SCHEDULE_EN BIT(5)
970#define MACTXEN BIT(6)
971#define MACRXEN BIT(7)
972#define ENSWBCN BIT(8)
973#define ENSEC BIT(9)
974
975#define _NETTYPE(x) (((x) & 0x3) << 16)
976#define MASK_NETTYPE 0x30000
977#define NT_NO_LINK 0x0
978#define NT_LINK_AD_HOC 0x1
979#define NT_LINK_AP 0x2
980#define NT_AS_AP 0x3
981
982#define _LBMODE(x) (((x) & 0xF) << 24)
983#define MASK_LBMODE 0xF000000
984#define LOOPBACK_NORMAL 0x0
985#define LOOPBACK_IMMEDIATELY 0xB
986#define LOOPBACK_MAC_DELAY 0x3
987#define LOOPBACK_PHY 0x1
988#define LOOPBACK_DMA 0x7
989
990#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
991#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
992#define _PSRX_MASK 0xF
993#define _PSTX_MASK 0xF0
994#define _PSRX(x) (x)
995#define _PSTX(x) ((x) << 4)
996
997#define PBP_64 0x0
998#define PBP_128 0x1
999#define PBP_256 0x2
1000#define PBP_512 0x3
1001#define PBP_1024 0x4
1002
1003#define RXDMA_ARBBW_EN BIT(0)
1004#define RXSHFT_EN BIT(1)
1005#define RXDMA_AGG_EN BIT(2)
1006#define QS_VO_QUEUE BIT(8)
1007#define QS_VI_QUEUE BIT(9)
1008#define QS_BE_QUEUE BIT(10)
1009#define QS_BK_QUEUE BIT(11)
1010#define QS_MANAGER_QUEUE BIT(12)
1011#define QS_HIGH_QUEUE BIT(13)
1012
1013#define HQSEL_VOQ BIT(0)
1014#define HQSEL_VIQ BIT(1)
1015#define HQSEL_BEQ BIT(2)
1016#define HQSEL_BKQ BIT(3)
1017#define HQSEL_MGTQ BIT(4)
1018#define HQSEL_HIQ BIT(5)
1019
1020#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
1021#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
1022#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
1023#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
1024#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
1025#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
1026
1027#define QUEUE_LOW 1
1028#define QUEUE_NORMAL 2
1029#define QUEUE_HIGH 3
1030
1031#define _LLT_NO_ACTIVE 0x0
1032#define _LLT_WRITE_ACCESS 0x1
1033#define _LLT_READ_ACCESS 0x2
1034
1035#define _LLT_INIT_DATA(x) ((x) & 0xFF)
1036#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
1037#define _LLT_OP(x) (((x) & 0x3) << 30)
1038#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
1039
1040#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
1041#define BB_WRITE_EN BIT(30)
1042#define BB_READ_EN BIT(31)
1043
1044#define _HPQ(x) ((x) & 0xFF)
1045#define _LPQ(x) (((x) & 0xFF) << 8)
1046#define _PUBQ(x) (((x) & 0xFF) << 16)
1047#define _NPQ(x) ((x) & 0xFF)
1048
1049#define HPQ_PUBLIC_DIS BIT(24)
1050#define LPQ_PUBLIC_DIS BIT(25)
1051#define LD_RQPN BIT(31)
1052
1053#define BCN_VALID BIT(16)
1054#define BCN_HEAD(x) (((x) & 0xFF) << 8)
1055#define BCN_HEAD_MASK 0xFF00
1056
1057#define BLK_DESC_NUM_SHIFT 4
1058#define BLK_DESC_NUM_MASK 0xF
1059
1060#define DROP_DATA_EN BIT(9)
1061
1062#define EN_AMPDU_RTY_NEW BIT(7)
1063
1064#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
1065
1066#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
1067#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
1068
1069#define RATE_REG_BITMAP_ALL 0xFFFFF
1070
1071#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
1072
1073#define _RRSR_RSC(x) (((x) & 0x3) << 21)
1074#define RRSR_RSC_RESERVED 0x0
1075#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
1076#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
1077#define RRSR_RSC_DUPLICATE_MODE 0x3
1078
1079#define USE_SHORT_G1 BIT(20)
1080
1081#define _AGGLMT_MCS0(x) ((x) & 0xF)
1082#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
1083#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
1084#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
1085#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
1086#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
1087#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
1088#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
1089
1090#define RETRY_LIMIT_SHORT_SHIFT 8
1091#define RETRY_LIMIT_LONG_SHIFT 0
1092
1093#define _DARF_RC1(x) ((x) & 0x1F)
1094#define _DARF_RC2(x) (((x) & 0x1F) << 8)
1095#define _DARF_RC3(x) (((x) & 0x1F) << 16)
1096#define _DARF_RC4(x) (((x) & 0x1F) << 24)
1097#define _DARF_RC5(x) ((x) & 0x1F)
1098#define _DARF_RC6(x) (((x) & 0x1F) << 8)
1099#define _DARF_RC7(x) (((x) & 0x1F) << 16)
1100#define _DARF_RC8(x) (((x) & 0x1F) << 24)
1101
1102#define _RARF_RC1(x) ((x) & 0x1F)
1103#define _RARF_RC2(x) (((x) & 0x1F) << 8)
1104#define _RARF_RC3(x) (((x) & 0x1F) << 16)
1105#define _RARF_RC4(x) (((x) & 0x1F) << 24)
1106#define _RARF_RC5(x) ((x) & 0x1F)
1107#define _RARF_RC6(x) (((x) & 0x1F) << 8)
1108#define _RARF_RC7(x) (((x) & 0x1F) << 16)
1109#define _RARF_RC8(x) (((x) & 0x1F) << 24)
1110
1111#define AC_PARAM_TXOP_LIMIT_OFFSET 16
1112#define AC_PARAM_ECW_MAX_OFFSET 12
1113#define AC_PARAM_ECW_MIN_OFFSET 8
1114#define AC_PARAM_AIFS_OFFSET 0
1115
1116#define _AIFS(x) (x)
1117#define _ECW_MAX_MIN(x) ((x) << 8)
1118#define _TXOP_LIMIT(x) ((x) << 16)
1119
1120#define _BCNIFS(x) ((x) & 0xFF)
1121#define _BCNECW(x) ((((x) & 0xF)) << 8)
1122
1123#define _LRL(x) ((x) & 0x3F)
1124#define _SRL(x) (((x) & 0x3F) << 8)
1125
1126#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
1127#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8)
1128
1129#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
1130#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8)
1131
1132#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
1133
1134#define DIS_EDCA_CNT_DWN BIT(11)
1135
1136#define EN_MBSSID BIT(1)
1137#define EN_TXBCN_RPT BIT(2)
1138#define EN_BCN_FUNCTION BIT(3)
1139
1140#define TSFTR_RST BIT(0)
1141#define TSFTR1_RST BIT(1)
1142
1143#define STOP_BCNQ BIT(6)
1144
1145#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
1146#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
1147
1148#define ACMHW_HWEN BIT(0)
1149#define ACMHW_BEQEN BIT(1)
1150#define ACMHW_VIQEN BIT(2)
1151#define ACMHW_VOQEN BIT(3)
1152#define ACMHW_BEQSTATUS BIT(4)
1153#define ACMHW_VIQSTATUS BIT(5)
1154#define ACMHW_VOQSTATUS BIT(6)
1155
1156#define APSDOFF BIT(6)
1157#define APSDOFF_STATUS BIT(7)
1158
1159#define BW_20MHZ BIT(2)
1160
1161#define RATE_BITMAP_ALL 0xFFFFF
1162
1163#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
1164
1165#define TSFRST BIT(0)
1166#define DIS_GCLK BIT(1)
1167#define PAD_SEL BIT(2)
1168#define PWR_ST BIT(6)
1169#define PWRBIT_OW_EN BIT(7)
1170#define ACRC BIT(8)
1171#define CFENDFORM BIT(9)
1172#define ICV BIT(10)
1173
1174#define AAP BIT(0)
1175#define APM BIT(1)
1176#define AM BIT(2)
1177#define AB BIT(3)
1178#define ADD3 BIT(4)
1179#define APWRMGT BIT(5)
1180#define CBSSID BIT(6)
1181#define CBSSID_DATA BIT(6)
1182#define CBSSID_BCN BIT(7)
1183#define ACRC32 BIT(8)
1184#define AICV BIT(9)
1185#define ADF BIT(11)
1186#define ACF BIT(12)
1187#define AMF BIT(13)
1188#define HTC_LOC_CTRL BIT(14)
1189#define UC_DATA_EN BIT(16)
1190#define BM_DATA_EN BIT(17)
1191#define MFBEN BIT(22)
1192#define LSIGEN BIT(23)
1193#define ENMBID BIT(24)
1194#define APP_BASSN BIT(27)
1195#define APP_PHYSTS BIT(28)
1196#define APP_ICV BIT(29)
1197#define APP_MIC BIT(30)
1198#define APP_FCS BIT(31)
1199
1200#define _MIN_SPACE(x) ((x) & 0x7)
1201#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
1202
1203#define RXERR_TYPE_OFDM_PPDU 0
1204#define RXERR_TYPE_OFDM_FALSE_ALARM 1
1205#define RXERR_TYPE_OFDM_MPDU_OK 2
1206#define RXERR_TYPE_OFDM_MPDU_FAIL 3
1207#define RXERR_TYPE_CCK_PPDU 4
1208#define RXERR_TYPE_CCK_FALSE_ALARM 5
1209#define RXERR_TYPE_CCK_MPDU_OK 6
1210#define RXERR_TYPE_CCK_MPDU_FAIL 7
1211#define RXERR_TYPE_HT_PPDU 8
1212#define RXERR_TYPE_HT_FALSE_ALARM 9
1213#define RXERR_TYPE_HT_MPDU_TOTAL 10
1214#define RXERR_TYPE_HT_MPDU_OK 11
1215#define RXERR_TYPE_HT_MPDU_FAIL 12
1216#define RXERR_TYPE_RX_FULL_DROP 15
1217
1218#define RXERR_COUNTER_MASK 0xFFFFF
1219#define RXERR_RPT_RST BIT(27)
1220#define _RXERR_RPT_SEL(type) ((type) << 28)
1221
1222#define SCR_TXUSEDK BIT(0)
1223#define SCR_RXUSEDK BIT(1)
1224#define SCR_TXENCENABLE BIT(2)
1225#define SCR_RXDECENABLE BIT(3)
1226#define SCR_SKBYA2 BIT(4)
1227#define SCR_NOSKMC BIT(5)
1228#define SCR_TXBCUSEDK BIT(6)
1229#define SCR_RXBCUSEDK BIT(7)
1230
1231#define XCLK_VLD BIT(0)
1232#define ACLK_VLD BIT(1)
1233#define UCLK_VLD BIT(2)
1234#define PCLK_VLD BIT(3)
1235#define PCIRSTB BIT(4)
1236#define V15_VLD BIT(5)
1237#define TRP_B15V_EN BIT(7)
1238#define SIC_IDLE BIT(8)
1239#define BD_MAC2 BIT(9)
1240#define BD_MAC1 BIT(10)
1241#define IC_MACPHY_MODE BIT(11)
1242#define BT_FUNC BIT(16)
1243#define VENDOR_ID BIT(19)
1244#define PAD_HWPD_IDN BIT(22)
1245#define TRP_VAUX_EN BIT(23)
1246#define TRP_BT_EN BIT(24)
1247#define BD_PKG_SEL BIT(25)
1248#define BD_HCI_SEL BIT(26)
1249#define TYPE_ID BIT(27)
1250
1251#define USB_IS_HIGH_SPEED 0
1252#define USB_IS_FULL_SPEED 1
1253#define USB_SPEED_MASK BIT(5)
1254
1255#define USB_NORMAL_SIE_EP_MASK 0xF
1256#define USB_NORMAL_SIE_EP_SHIFT 4
1257
1258#define USB_TEST_EP_MASK 0x30
1259#define USB_TEST_EP_SHIFT 4
1260
1261#define USB_AGG_EN BIT(3)
1262
1263#define MAC_ADDR_LEN 6
1264#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/
1265
1266#define POLLING_LLT_THRESHOLD 20
1267#define POLLING_READY_TIMEOUT_COUNT 3000
1268
1269#define MAX_MSS_DENSITY_2T 0x13
1270#define MAX_MSS_DENSITY_1T 0x0A
1271
1272#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
1273#define EPROM_CMD_CONFIG 0x3
1274#define EPROM_CMD_LOAD 1
1275
1276#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
1277
1278#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1279
1280#define RPMAC_RESET 0x100
1281#define RPMAC_TXSTART 0x104
1282#define RPMAC_TXLEGACYSIG 0x108
1283#define RPMAC_TXHTSIG1 0x10c
1284#define RPMAC_TXHTSIG2 0x110
1285#define RPMAC_PHYDEBUG 0x114
1286#define RPMAC_TXPACKETNUM 0x118
1287#define RPMAC_TXIDLE 0x11c
1288#define RPMAC_TXMACHEADER0 0x120
1289#define RPMAC_TXMACHEADER1 0x124
1290#define RPMAC_TXMACHEADER2 0x128
1291#define RPMAC_TXMACHEADER3 0x12c
1292#define RPMAC_TXMACHEADER4 0x130
1293#define RPMAC_TXMACHEADER5 0x134
1294#define RPMAC_TXDADATYPE 0x138
1295#define RPMAC_TXRANDOMSEED 0x13c
1296#define RPMAC_CCKPLCPPREAMBLE 0x140
1297#define RPMAC_CCKPLCPHEADER 0x144
1298#define RPMAC_CCKCRC16 0x148
1299#define RPMAC_OFDMRXCRC32OK 0x170
1300#define RPMAC_OFDMRXCRC32ER 0x174
1301#define RPMAC_OFDMRXPARITYER 0x178
1302#define RPMAC_OFDMRXCRC8ER 0x17c
1303#define RPMAC_CCKCRXRC16ER 0x180
1304#define RPMAC_CCKCRXRC32ER 0x184
1305#define RPMAC_CCKCRXRC32OK 0x188
1306#define RPMAC_TXSTATUS 0x18c
1307
1308#define RFPGA0_RFMOD 0x800
1309
1310#define RFPGA0_TXINFO 0x804
1311#define RFPGA0_PSDFUNCTION 0x808
1312
1313#define RFPGA0_TXGAINSTAGE 0x80c
1314
1315#define RFPGA0_RFTIMING1 0x810
1316#define RFPGA0_RFTIMING2 0x814
1317
1318#define RFPGA0_XA_HSSIPARAMETER1 0x820
1319#define RFPGA0_XA_HSSIPARAMETER2 0x824
1320#define RFPGA0_XB_HSSIPARAMETER1 0x828
1321#define RFPGA0_XB_HSSIPARAMETER2 0x82c
1322
1323#define RFPGA0_XA_LSSIPARAMETER 0x840
1324#define RFPGA0_XB_LSSIPARAMETER 0x844
1325
1326#define RFPGA0_RFWAKEUPPARAMETER 0x850
1327#define RFPGA0_RFSLEEPUPPARAMETER 0x854
1328
1329#define RFPGA0_XAB_SWITCHCONTROL 0x858
1330#define RFPGA0_XCD_SWITCHCONTROL 0x85c
1331
1332#define RFPGA0_XA_RFINTERFACEOE 0x860
1333#define RFPGA0_XB_RFINTERFACEOE 0x864
1334
1335#define RFPGA0_XAB_RFINTERFACESW 0x870
1336#define RFPGA0_XCD_RFINTERFACESW 0x874
1337
1338#define RFPGA0_XAB_RFPARAMETER 0x878
1339#define RFPGA0_XCD_RFPARAMETER 0x87c
1340
1341#define RFPGA0_ANALOGPARAMETER1 0x880
1342#define RFPGA0_ANALOGPARAMETER2 0x884
1343#define RFPGA0_ANALOGPARAMETER3 0x888
1344#define RFPGA0_ANALOGPARAMETER4 0x88c
1345
1346#define RFPGA0_XA_LSSIREADBACK 0x8a0
1347#define RFPGA0_XB_LSSIREADBACK 0x8a4
1348#define RFPGA0_XC_LSSIREADBACK 0x8a8
1349#define RFPGA0_XD_LSSIREADBACK 0x8ac
1350
1351#define RFPGA0_PSDREPORT 0x8b4
1352#define TRANSCEIVEA_HSPI_READBACK 0x8b8
1353#define TRANSCEIVEB_HSPI_READBACK 0x8bc
1354#define REG_SC_CNT 0x8c4
1355#define RFPGA0_XAB_RFINTERFACERB 0x8e0
1356#define RFPGA0_XCD_RFINTERFACERB 0x8e4
1357
1358#define RFPGA1_RFMOD 0x900
1359
1360#define RFPGA1_TXBLOCK 0x904
1361#define RFPGA1_DEBUGSELECT 0x908
1362#define RFPGA1_TXINFO 0x90c
1363
1364#define RCCK0_SYSTEM 0xa00
1365
1366#define RCCK0_AFESETTING 0xa04
1367#define RCCK0_CCA 0xa08
1368
1369#define RCCK0_RXAGC1 0xa0c
1370#define RCCK0_RXAGC2 0xa10
1371
1372#define RCCK0_RXHP 0xa14
1373
1374#define RCCK0_DSPPARAMETER1 0xa18
1375#define RCCK0_DSPPARAMETER2 0xa1c
1376
1377#define RCCK0_TXFILTER1 0xa20
1378#define RCCK0_TXFILTER2 0xa24
1379#define RCCK0_DEBUGPORT 0xa28
1380#define RCCK0_FALSEALARMREPORT 0xa2c
1381#define RCCK0_TRSSIREPORT 0xa50
1382#define RCCK0_RXREPORT 0xa54
1383#define RCCK0_FACOUNTERLOWER 0xa5c
1384#define RCCK0_FACOUNTERUPPER 0xa58
1385#define RCCK0_CCA_CNT 0xa60
1386
1387
1388/* PageB(0xB00) */
1389#define RPDP_ANTA 0xb00
1390#define RPDP_ANTA_4 0xb04
1391#define RPDP_ANTA_8 0xb08
1392#define RPDP_ANTA_C 0xb0c
1393#define RPDP_ANTA_10 0xb10
1394#define RPDP_ANTA_14 0xb14
1395#define RPDP_ANTA_18 0xb18
1396#define RPDP_ANTA_1C 0xb1c
1397#define RPDP_ANTA_20 0xb20
1398#define RPDP_ANTA_24 0xb24
1399
1400#define RCONFIG_PMPD_ANTA 0xb28
1401#define CONFIG_RAM64X16 0xb2c
1402
1403#define RBNDA 0xb30
1404#define RHSSIPAR 0xb34
1405
1406#define RCONFIG_ANTA 0xb68
1407#define RCONFIG_ANTB 0xb6c
1408
1409#define RPDP_ANTB 0xb70
1410#define RPDP_ANTB_4 0xb74
1411#define RPDP_ANTB_8 0xb78
1412#define RPDP_ANTB_C 0xb7c
1413#define RPDP_ANTB_10 0xb80
1414#define RPDP_ANTB_14 0xb84
1415#define RPDP_ANTB_18 0xb88
1416#define RPDP_ANTB_1C 0xb8c
1417#define RPDP_ANTB_20 0xb90
1418#define RPDP_ANTB_24 0xb94
1419
1420#define RCONFIG_PMPD_ANTB 0xb98
1421
1422#define RBNDB 0xba0
1423
1424#define RAPK 0xbd8
1425#define RPM_RX0_ANTA 0xbdc
1426#define RPM_RX1_ANTA 0xbe0
1427#define RPM_RX2_ANTA 0xbe4
1428#define RPM_RX3_ANTA 0xbe8
1429#define RPM_RX0_ANTB 0xbec
1430#define RPM_RX1_ANTB 0xbf0
1431#define RPM_RX2_ANTB 0xbf4
1432#define RPM_RX3_ANTB 0xbf8
1433
1434/*Page C*/
1435#define ROFDM0_LSTF 0xc00
1436
1437#define ROFDM0_TRXPATHENABLE 0xc04
1438#define ROFDM0_TRMUXPAR 0xc08
1439#define ROFDM0_TRSWISOLATION 0xc0c
1440
1441#define ROFDM0_XARXAFE 0xc10
1442#define ROFDM0_XARXIQIMBALANCE 0xc14
1443#define ROFDM0_XBRXAFE 0xc18
1444#define ROFDM0_XBRXIQIMBALANCE 0xc1c
1445#define ROFDM0_XCRXAFE 0xc20
1446#define ROFDM0_XCRXIQIMBANLANCE 0xc24
1447#define ROFDM0_XDRXAFE 0xc28
1448#define ROFDM0_XDRXIQIMBALANCE 0xc2c
1449
1450#define ROFDM0_RXDETECTOR1 0xc30
1451#define ROFDM0_RXDETECTOR2 0xc34
1452#define ROFDM0_RXDETECTOR3 0xc38
1453#define ROFDM0_RXDETECTOR4 0xc3c
1454
1455#define ROFDM0_RXDSP 0xc40
1456#define ROFDM0_CFOANDDAGC 0xc44
1457#define ROFDM0_CCADROPTHRESHOLD 0xc48
1458#define ROFDM0_ECCATHRESHOLD 0xc4c
1459
1460#define ROFDM0_XAAGCCORE1 0xc50
1461#define ROFDM0_XAAGCCORE2 0xc54
1462#define ROFDM0_XBAGCCORE1 0xc58
1463#define ROFDM0_XBAGCCORE2 0xc5c
1464#define ROFDM0_XCAGCCORE1 0xc60
1465#define ROFDM0_XCAGCCORE2 0xc64
1466#define ROFDM0_XDAGCCORE1 0xc68
1467#define ROFDM0_XDAGCCORE2 0xc6c
1468
1469#define ROFDM0_AGCPARAMETER1 0xc70
1470#define ROFDM0_AGCPARAMETER2 0xc74
1471#define ROFDM0_AGCRSSITABLE 0xc78
1472#define ROFDM0_HTSTFAGC 0xc7c
1473
1474#define ROFDM0_XATXIQIMBALANCE 0xc80
1475#define ROFDM0_XATXAFE 0xc84
1476#define ROFDM0_XBTXIQIMBALANCE 0xc88
1477#define ROFDM0_XBTXAFE 0xc8c
1478#define ROFDM0_XCTXIQIMBALANCE 0xc90
1479#define ROFDM0_XCTXAFE 0xc94
1480#define ROFDM0_XDTXIQIMBALANCE 0xc98
1481#define ROFDM0_XDTXAFE 0xc9c
1482
1483#define ROFDM0_RXIQEXTANTA 0xca0
1484#define ROFDM0_TXCOEFF1 0xca4
1485#define ROFDM0_TXCOEFF2 0xca8
1486#define ROFDM0_TXCOEFF3 0xcac
1487#define ROFDM0_TXCOEFF4 0xcb0
1488#define ROFDM0_TXCOEFF5 0xcb4
1489#define ROFDM0_TXCOEFF6 0xcb8
1490
1491#define ROFDM0_RXHPPARAMETER 0xce0
1492#define ROFDM0_TXPSEUDONOISEWGT 0xce4
1493#define ROFDM0_FRAMESYNC 0xcf0
1494#define ROFDM0_DFSREPORT 0xcf4
1495
1496
1497#define ROFDM1_LSTF 0xd00
1498#define ROFDM1_TRXPATHENABLE 0xd04
1499
1500#define ROFDM1_CF0 0xd08
1501#define ROFDM1_CSI1 0xd10
1502#define ROFDM1_SBD 0xd14
1503#define ROFDM1_CSI2 0xd18
1504#define ROFDM1_CFOTRACKING 0xd2c
1505#define ROFDM1_TRXMESAURE1 0xd34
1506#define ROFDM1_INTFDET 0xd3c
1507#define ROFDM1_PSEUDONOISESTATEAB 0xd50
1508#define ROFDM1_PSEUDONOISESTATECD 0xd54
1509#define ROFDM1_RXPSEUDONOISEWGT 0xd58
1510
1511#define ROFDM_PHYCOUNTER1 0xda0
1512#define ROFDM_PHYCOUNTER2 0xda4
1513#define ROFDM_PHYCOUNTER3 0xda8
1514
1515#define ROFDM_SHORTCFOAB 0xdac
1516#define ROFDM_SHORTCFOCD 0xdb0
1517#define ROFDM_LONGCFOAB 0xdb4
1518#define ROFDM_LONGCFOCD 0xdb8
1519#define ROFDM_TAILCF0AB 0xdbc
1520#define ROFDM_TAILCF0CD 0xdc0
1521#define ROFDM_PWMEASURE1 0xdc4
1522#define ROFDM_PWMEASURE2 0xdc8
1523#define ROFDM_BWREPORT 0xdcc
1524#define ROFDM_AGCREPORT 0xdd0
1525#define ROFDM_RXSNR 0xdd4
1526#define ROFDM_RXEVMCSI 0xdd8
1527#define ROFDM_SIGREPORT 0xddc
1528
1529#define RTXAGC_A_RATE18_06 0xe00
1530#define RTXAGC_A_RATE54_24 0xe04
1531#define RTXAGC_A_CCK1_MCS32 0xe08
1532#define RTXAGC_A_MCS03_MCS00 0xe10
1533#define RTXAGC_A_MCS07_MCS04 0xe14
1534#define RTXAGC_A_MCS11_MCS08 0xe18
1535#define RTXAGC_A_MCS15_MCS12 0xe1c
1536
1537#define RTXAGC_B_RATE18_06 0x830
1538#define RTXAGC_B_RATE54_24 0x834
1539#define RTXAGC_B_CCK1_55_MCS32 0x838
1540#define RTXAGC_B_MCS03_MCS00 0x83c
1541#define RTXAGC_B_MCS07_MCS04 0x848
1542#define RTXAGC_B_MCS11_MCS08 0x84c
1543#define RTXAGC_B_MCS15_MCS12 0x868
1544#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
1545
1546#define RFPGA0_IQK 0xe28
1547#define RTX_IQK_TONE_A 0xe30
1548#define RRX_IQK_TONE_A 0xe34
1549#define RTX_IQK_PI_A 0xe38
1550#define RRX_IQK_PI_A 0xe3c
1551
1552#define RTX_IQK 0xe40
1553#define RRX_IQK 0xe44
1554#define RIQK_AGC_PTS 0xe48
1555#define RIQK_AGC_RSP 0xe4c
1556#define RTX_IQK_TONE_B 0xe50
1557#define RRX_IQK_TONE_B 0xe54
1558#define RTX_IQK_PI_B 0xe58
1559#define RRX_IQK_PI_B 0xe5c
1560#define RIQK_AGC_CONT 0xe60
1561
1562#define RBLUE_TOOTH 0xe6c
1563#define RRX_WAIT_CCA 0xe70
1564#define RTX_CCK_RFON 0xe74
1565#define RTX_CCK_BBON 0xe78
1566#define RTX_OFDM_RFON 0xe7c
1567#define RTX_OFDM_BBON 0xe80
1568#define RTX_TO_RX 0xe84
1569#define RTX_TO_TX 0xe88
1570#define RRX_CCK 0xe8c
1571
1572#define RTX_POWER_BEFORE_IQK_A 0xe94
1573#define RTX_POWER_AFTER_IQK_A 0xe9c
1574
1575#define RRX_POWER_BEFORE_IQK_A 0xea0
1576#define RRX_POWER_BEFORE_IQK_A_2 0xea4
1577#define RRX_POWER_AFTER_IQK_A 0xea8
1578#define RRX_POWER_AFTER_IQK_A_2 0xeac
1579
1580#define RTX_POWER_BEFORE_IQK_B 0xeb4
1581#define RTX_POWER_AFTER_IQK_B 0xebc
1582
1583#define RRX_POWER_BEFORE_IQK_B 0xec0
1584#define RRX_POWER_BEFORE_IQK_B_2 0xec4
1585#define RRX_POWER_AFTER_IQK_B 0xec8
1586#define RRX_POWER_AFTER_IQK_B_2 0xecc
1587
1588#define RRX_OFDM 0xed0
1589#define RRX_WAIT_RIFS 0xed4
1590#define RRX_TO_RX 0xed8
1591#define RSTANDBY 0xedc
1592#define RSLEEP 0xee0
1593#define RPMPD_ANAEN 0xeec
1594
1595#define RZEBRA1_HSSIENABLE 0x0
1596#define RZEBRA1_TRXENABLE1 0x1
1597#define RZEBRA1_TRXENABLE2 0x2
1598#define RZEBRA1_AGC 0x4
1599#define RZEBRA1_CHARGEPUMP 0x5
1600#define RZEBRA1_CHANNEL 0x7
1601
1602#define RZEBRA1_TXGAIN 0x8
1603#define RZEBRA1_TXLPF 0x9
1604#define RZEBRA1_RXLPF 0xb
1605#define RZEBRA1_RXHPFCORNER 0xc
1606
1607#define RGLOBALCTRL 0
1608#define RRTL8256_TXLPF 19
1609#define RRTL8256_RXLPF 11
1610#define RRTL8258_TXLPF 0x11
1611#define RRTL8258_RXLPF 0x13
1612#define RRTL8258_RSSILPF 0xa
1613
1614#define RF_AC 0x00
1615
1616#define RF_IQADJ_G1 0x01
1617#define RF_IQADJ_G2 0x02
1618#define RF_POW_TRSW 0x05
1619
1620#define RF_GAIN_RX 0x06
1621#define RF_GAIN_TX 0x07
1622
1623#define RF_TXM_IDAC 0x08
1624#define RF_BS_IQGEN 0x0F
1625
1626#define RF_MODE1 0x10
1627#define RF_MODE2 0x11
1628
1629#define RF_RX_AGC_HP 0x12
1630#define RF_TX_AGC 0x13
1631#define RF_BIAS 0x14
1632#define RF_IPA 0x15
1633#define RF_POW_ABILITY 0x17
1634#define RF_MODE_AG 0x18
1635#define RRFCHANNEL 0x18
1636#define RF_CHNLBW 0x18
1637#define RF_TOP 0x19
1638
1639#define RF_RX_G1 0x1A
1640#define RF_RX_G2 0x1B
1641
1642#define RF_RX_BB2 0x1C
1643#define RF_RX_BB1 0x1D
1644
1645#define RF_RCK1 0x1E
1646#define RF_RCK2 0x1F
1647
1648#define RF_TX_G1 0x20
1649#define RF_TX_G2 0x21
1650#define RF_TX_G3 0x22
1651
1652#define RF_TX_BB1 0x23
1653#define RF_T_METER 0x42
1654
1655#define RF_SYN_G1 0x25
1656#define RF_SYN_G2 0x26
1657#define RF_SYN_G3 0x27
1658#define RF_SYN_G4 0x28
1659#define RF_SYN_G5 0x29
1660#define RF_SYN_G6 0x2A
1661#define RF_SYN_G7 0x2B
1662#define RF_SYN_G8 0x2C
1663
1664#define RF_RCK_OS 0x30
1665#define RF_TXPA_G1 0x31
1666#define RF_TXPA_G2 0x32
1667#define RF_TXPA_G3 0x33
1668
1669#define RF_TX_BIAS_A 0x35
1670#define RF_TX_BIAS_D 0x36
1671#define RF_LOBF_9 0x38
1672#define RF_RXRF_A3 0x3C
1673#define RF_TRSW 0x3F
1674
1675#define RF_TXRF_A2 0x41
1676#define RF_TXPA_G4 0x46
1677#define RF_TXPA_A4 0x4B
1678
1679#define RF_WE_LUT 0xEF
1680
1681#define BBBRESETB 0x100
1682#define BGLOBALRESETB 0x200
1683#define BOFDMTXSTART 0x4
1684#define BCCKTXSTART 0x8
1685#define BCRC32DEBUG 0x100
1686#define BPMACLOOPBACK 0x10
1687#define BTXLSIG 0xffffff
1688#define BOFDMTXRATE 0xf
1689#define BOFDMTXRESERVED 0x10
1690#define BOFDMTXLENGTH 0x1ffe0
1691#define BOFDMTXPARITY 0x20000
1692#define BTXHTSIG1 0xffffff
1693#define BTXHTMCSRATE 0x7f
1694#define BTXHTBW 0x80
1695#define BTXHTLENGTH 0xffff00
1696#define BTXHTSIG2 0xffffff
1697#define BTXHTSMOOTHING 0x1
1698#define BTXHTSOUNDING 0x2
1699#define BTXHTRESERVED 0x4
1700#define BTXHTAGGREATION 0x8
1701#define BTXHTSTBC 0x30
1702#define BTXHTADVANCECODING 0x40
1703#define BTXHTSHORTGI 0x80
1704#define BTXHTNUMBERHT_LTF 0x300
1705#define BTXHTCRC8 0x3fc00
1706#define BCOUNTERRESET 0x10000
1707#define BNUMOFOFDMTX 0xffff
1708#define BNUMOFCCKTX 0xffff0000
1709#define BTXIDLEINTERVAL 0xffff
1710#define BOFDMSERVICE 0xffff0000
1711#define BTXMACHEADER 0xffffffff
1712#define BTXDATAINIT 0xff
1713#define BTXHTMODE 0x100
1714#define BTXDATATYPE 0x30000
1715#define BTXRANDOMSEED 0xffffffff
1716#define BCCKTXPREAMBLE 0x1
1717#define BCCKTXSFD 0xffff0000
1718#define BCCKTXSIG 0xff
1719#define BCCKTXSERVICE 0xff00
1720#define BCCKLENGTHEXT 0x8000
1721#define BCCKTXLENGHT 0xffff0000
1722#define BCCKTXCRC16 0xffff
1723#define BCCKTXSTATUS 0x1
1724#define BOFDMTXSTATUS 0x2
1725#define IS_BB_REG_OFFSET_92S(_offset) \
1726 ((_offset >= 0x800) && (_offset <= 0xfff))
1727
1728#define BRFMOD 0x1
1729#define BJAPANMODE 0x2
1730#define BCCKTXSC 0x30
1731#define BCCKEN 0x1000000
1732#define BOFDMEN 0x2000000
1733
1734#define BOFDMRXADCPHASE 0x10000
1735#define BOFDMTXDACPHASE 0x40000
1736#define BXATXAGC 0x3f
1737
1738#define BXBTXAGC 0xf00
1739#define BXCTXAGC 0xf000
1740#define BXDTXAGC 0xf0000
1741
1742#define BPASTART 0xf0000000
1743#define BTRSTART 0x00f00000
1744#define BRFSTART 0x0000f000
1745#define BBBSTART 0x000000f0
1746#define BBBCCKSTART 0x0000000f
1747#define BPAEND 0xf
1748#define BTREND 0x0f000000
1749#define BRFEND 0x000f0000
1750#define BCCAMASK 0x000000f0
1751#define BR2RCCAMASK 0x00000f00
1752#define BHSSI_R2TDELAY 0xf8000000
1753#define BHSSI_T2RDELAY 0xf80000
1754#define BCONTXHSSI 0x400
1755#define BIGFROMCCK 0x200
1756#define BAGCADDRESS 0x3f
1757#define BRXHPTX 0x7000
1758#define BRXHP2RX 0x38000
1759#define BRXHPCCKINI 0xc0000
1760#define BAGCTXCODE 0xc00000
1761#define BAGCRXCODE 0x300000
1762
1763#define B3WIREDATALENGTH 0x800
1764#define B3WIREADDREAALENGTH 0x400
1765
1766#define B3WIRERFPOWERDOWN 0x1
1767#define B5GPAPEPOLARITY 0x40000000
1768#define B2GPAPEPOLARITY 0x80000000
1769#define BRFSW_TXDEFAULTANT 0x3
1770#define BRFSW_TXOPTIONANT 0x30
1771#define BRFSW_RXDEFAULTANT 0x300
1772#define BRFSW_RXOPTIONANT 0x3000
1773#define BRFSI_3WIREDATA 0x1
1774#define BRFSI_3WIRECLOCK 0x2
1775#define BRFSI_3WIRELOAD 0x4
1776#define BRFSI_3WIRERW 0x8
1777#define BRFSI_3WIRE 0xf
1778
1779#define BRFSI_RFENV 0x10
1780
1781#define BRFSI_TRSW 0x20
1782#define BRFSI_TRSWB 0x40
1783#define BRFSI_ANTSW 0x100
1784#define BRFSI_ANTSWB 0x200
1785#define BRFSI_PAPE 0x400
1786#define BRFSI_PAPE5G 0x800
1787#define BBANDSELECT 0x1
1788#define BHTSIG2_GI 0x80
1789#define BHTSIG2_SMOOTHING 0x01
1790#define BHTSIG2_SOUNDING 0x02
1791#define BHTSIG2_AGGREATON 0x08
1792#define BHTSIG2_STBC 0x30
1793#define BHTSIG2_ADVCODING 0x40
1794#define BHTSIG2_NUMOFHTLTF 0x300
1795#define BHTSIG2_CRC8 0x3fc
1796#define BHTSIG1_MCS 0x7f
1797#define BHTSIG1_BANDWIDTH 0x80
1798#define BHTSIG1_HTLENGTH 0xffff
1799#define BLSIG_RATE 0xf
1800#define BLSIG_RESERVED 0x10
1801#define BLSIG_LENGTH 0x1fffe
1802#define BLSIG_PARITY 0x20
1803#define BCCKRXPHASE 0x4
1804
1805#define BLSSIREADADDRESS 0x7f800000
1806#define BLSSIREADEDGE 0x80000000
1807
1808#define BLSSIREADBACKDATA 0xfffff
1809
1810#define BLSSIREADOKFLAG 0x1000
1811#define BCCKSAMPLERATE 0x8
1812#define BREGULATOR0STANDBY 0x1
1813#define BREGULATORPLLSTANDBY 0x2
1814#define BREGULATOR1STANDBY 0x4
1815#define BPLLPOWERUP 0x8
1816#define BDPLLPOWERUP 0x10
1817#define BDA10POWERUP 0x20
1818#define BAD7POWERUP 0x200
1819#define BDA6POWERUP 0x2000
1820#define BXTALPOWERUP 0x4000
1821#define B40MDCLKPOWERUP 0x8000
1822#define BDA6DEBUGMODE 0x20000
1823#define BDA6SWING 0x380000
1824
1825#define BADCLKPHASE 0x4000000
1826#define B80MCLKDELAY 0x18000000
1827#define BAFEWATCHDOGENABLE 0x20000000
1828
1829#define BXTALCAP01 0xc0000000
1830#define BXTALCAP23 0x3
1831#define BXTALCAP92X 0x0f000000
1832#define BXTALCAP 0x0f000000
1833
1834#define BINTDIFCLKENABLE 0x400
1835#define BEXTSIGCLKENABLE 0x800
1836#define BBANDGAP_MBIAS_POWERUP 0x10000
1837#define BAD11SH_GAIN 0xc0000
1838#define BAD11NPUT_RANGE 0x700000
1839#define BAD110P_CURRENT 0x3800000
1840#define BLPATH_LOOPBACK 0x4000000
1841#define BQPATH_LOOPBACK 0x8000000
1842#define BAFE_LOOPBACK 0x10000000
1843#define BDA10_SWING 0x7e0
1844#define BDA10_REVERSE 0x800
1845#define BDA_CLK_SOURCE 0x1000
1846#define BDA7INPUT_RANGE 0x6000
1847#define BDA7_GAIN 0x38000
1848#define BDA7OUTPUT_CM_MODE 0x40000
1849#define BDA7INPUT_CM_MODE 0x380000
1850#define BDA7CURRENT 0xc00000
1851#define BREGULATOR_ADJUST 0x7000000
1852#define BAD11POWERUP_ATTX 0x1
1853#define BDA10PS_ATTX 0x10
1854#define BAD11POWERUP_ATRX 0x100
1855#define BDA10PS_ATRX 0x1000
1856#define BCCKRX_AGC_FORMAT 0x200
1857#define BPSDFFT_SAMPLE_POINT 0xc000
1858#define BPSD_AVERAGE_NUM 0x3000
1859#define BIQPATH_CONTROL 0xc00
1860#define BPSD_FREQ 0x3ff
1861#define BPSD_ANTENNA_PATH 0x30
1862#define BPSD_IQ_SWITCH 0x40
1863#define BPSD_RX_TRIGGER 0x400000
1864#define BPSD_TX_TRIGGER 0x80000000
1865#define BPSD_SINE_TONE_SCALE 0x7f000000
1866#define BPSD_REPORT 0xffff
1867
1868#define BOFDM_TXSC 0x30000000
1869#define BCCK_TXON 0x1
1870#define BOFDM_TXON 0x2
1871#define BDEBUG_PAGE 0xfff
1872#define BDEBUG_ITEM 0xff
1873#define BANTL 0x10
1874#define BANT_NONHT 0x100
1875#define BANT_HT1 0x1000
1876#define BANT_HT2 0x10000
1877#define BANT_HT1S1 0x100000
1878#define BANT_NONHTS1 0x1000000
1879
1880#define BCCK_BBMODE 0x3
1881#define BCCK_TXPOWERSAVING 0x80
1882#define BCCK_RXPOWERSAVING 0x40
1883
1884#define BCCK_SIDEBAND 0x10
1885
1886#define BCCK_SCRAMBLE 0x8
1887#define BCCK_ANTDIVERSITY 0x8000
1888#define BCCK_CARRIER_RECOVERY 0x4000
1889#define BCCK_TXRATE 0x3000
1890#define BCCK_DCCANCEL 0x0800
1891#define BCCK_ISICANCEL 0x0400
1892#define BCCK_MATCH_FILTER 0x0200
1893#define BCCK_EQUALIZER 0x0100
1894#define BCCK_PREAMBLE_DETECT 0x800000
1895#define BCCK_FAST_FALSECCA 0x400000
1896#define BCCK_CH_ESTSTART 0x300000
1897#define BCCK_CCA_COUNT 0x080000
1898#define BCCK_CS_LIM 0x070000
1899#define BCCK_BIST_MODE 0x80000000
1900#define BCCK_CCAMASK 0x40000000
1901#define BCCK_TX_DAC_PHASE 0x4
1902#define BCCK_RX_ADC_PHASE 0x20000000
1903#define BCCKR_CP_MODE 0x0100
1904#define BCCK_TXDC_OFFSET 0xf0
1905#define BCCK_RXDC_OFFSET 0xf
1906#define BCCK_CCA_MODE 0xc000
1907#define BCCK_FALSECS_LIM 0x3f00
1908#define BCCK_CS_RATIO 0xc00000
1909#define BCCK_CORGBIT_SEL 0x300000
1910#define BCCK_PD_LIM 0x0f0000
1911#define BCCK_NEWCCA 0x80000000
1912#define BCCK_RXHP_OF_IG 0x8000
1913#define BCCK_RXIG 0x7f00
1914#define BCCK_LNA_POLARITY 0x800000
1915#define BCCK_RX1ST_BAIN 0x7f0000
1916#define BCCK_RF_EXTEND 0x20000000
1917#define BCCK_RXAGC_SATLEVEL 0x1f000000
1918#define BCCK_RXAGC_SATCOUNT 0xe0
1919#define BCCKRXRFSETTLE 0x1f
1920#define BCCK_FIXED_RXAGC 0x8000
1921#define BCCK_ANTENNA_POLARITY 0x2000
1922#define BCCK_TXFILTER_TYPE 0x0c00
1923#define BCCK_RXAGC_REPORTTYPE 0x0300
1924#define BCCK_RXDAGC_EN 0x80000000
1925#define BCCK_RXDAGC_PERIOD 0x20000000
1926#define BCCK_RXDAGC_SATLEVEL 0x1f000000
1927#define BCCK_TIMING_RECOVERY 0x800000
1928#define BCCK_TXC0 0x3f0000
1929#define BCCK_TXC1 0x3f000000
1930#define BCCK_TXC2 0x3f
1931#define BCCK_TXC3 0x3f00
1932#define BCCK_TXC4 0x3f0000
1933#define BCCK_TXC5 0x3f000000
1934#define BCCK_TXC6 0x3f
1935#define BCCK_TXC7 0x3f00
1936#define BCCK_DEBUGPORT 0xff0000
1937#define BCCK_DAC_DEBUG 0x0f000000
1938#define BCCK_FALSEALARM_ENABLE 0x8000
1939#define BCCK_FALSEALARM_READ 0x4000
1940#define BCCK_TRSSI 0x7f
1941#define BCCK_RXAGC_REPORT 0xfe
1942#define BCCK_RXREPORT_ANTSEL 0x80000000
1943#define BCCK_RXREPORT_MFOFF 0x40000000
1944#define BCCK_RXREPORT_SQLOSS 0x20000000
1945#define BCCK_RXREPORT_PKTLOSS 0x10000000
1946#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
1947#define BCCK_RXREPORT_RATEERROR 0x04000000
1948#define BCCK_RXREPORT_RXRATE 0x03000000
1949#define BCCK_RXFA_COUNTER_LOWER 0xff
1950#define BCCK_RXFA_COUNTER_UPPER 0xff000000
1951#define BCCK_RXHPAGC_START 0xe000
1952#define BCCK_RXHPAGC_FINAL 0x1c00
1953#define BCCK_RXFALSEALARM_ENABLE 0x8000
1954#define BCCK_FACOUNTER_FREEZE 0x4000
1955#define BCCK_TXPATH_SEL 0x10000000
1956#define BCCK_DEFAULT_RXPATH 0xc000000
1957#define BCCK_OPTION_RXPATH 0x3000000
1958
1959#define BNUM_OFSTF 0x3
1960#define BSHIFT_L 0xc0
1961#define BGI_TH 0xc
1962#define BRXPATH_A 0x1
1963#define BRXPATH_B 0x2
1964#define BRXPATH_C 0x4
1965#define BRXPATH_D 0x8
1966#define BTXPATH_A 0x1
1967#define BTXPATH_B 0x2
1968#define BTXPATH_C 0x4
1969#define BTXPATH_D 0x8
1970#define BTRSSI_FREQ 0x200
1971#define BADC_BACKOFF 0x3000
1972#define BDFIR_BACKOFF 0xc000
1973#define BTRSSI_LATCH_PHASE 0x10000
1974#define BRX_LDC_OFFSET 0xff
1975#define BRX_QDC_OFFSET 0xff00
1976#define BRX_DFIR_MODE 0x1800000
1977#define BRX_DCNF_TYPE 0xe000000
1978#define BRXIQIMB_A 0x3ff
1979#define BRXIQIMB_B 0xfc00
1980#define BRXIQIMB_C 0x3f0000
1981#define BRXIQIMB_D 0xffc00000
1982#define BDC_DC_NOTCH 0x60000
1983#define BRXNB_NOTCH 0x1f000000
1984#define BPD_TH 0xf
1985#define BPD_TH_OPT2 0xc000
1986#define BPWED_TH 0x700
1987#define BIFMF_WIN_L 0x800
1988#define BPD_OPTION 0x1000
1989#define BMF_WIN_L 0xe000
1990#define BBW_SEARCH_L 0x30000
1991#define BWIN_ENH_L 0xc0000
1992#define BBW_TH 0x700000
1993#define BED_TH2 0x3800000
1994#define BBW_OPTION 0x4000000
1995#define BRADIO_TH 0x18000000
1996#define BWINDOW_L 0xe0000000
1997#define BSBD_OPTION 0x1
1998#define BFRAME_TH 0x1c
1999#define BFS_OPTION 0x60
2000#define BDC_SLOPE_CHECK 0x80
2001#define BFGUARD_COUNTER_DC_L 0xe00
2002#define BFRAME_WEIGHT_SHORT 0x7000
2003#define BSUB_TUNE 0xe00000
2004#define BFRAME_DC_LENGTH 0xe000000
2005#define BSBD_START_OFFSET 0x30000000
2006#define BFRAME_TH_2 0x7
2007#define BFRAME_GI2_TH 0x38
2008#define BGI2_SYNC_EN 0x40
2009#define BSARCH_SHORT_EARLY 0x300
2010#define BSARCH_SHORT_LATE 0xc00
2011#define BSARCH_GI2_LATE 0x70000
2012#define BCFOANTSUM 0x1
2013#define BCFOACC 0x2
2014#define BCFOSTARTOFFSET 0xc
2015#define BCFOLOOPBACK 0x70
2016#define BCFOSUMWEIGHT 0x80
2017#define BDAGCENABLE 0x10000
2018#define BTXIQIMB_A 0x3ff
2019#define BTXIQIMB_b 0xfc00
2020#define BTXIQIMB_C 0x3f0000
2021#define BTXIQIMB_D 0xffc00000
2022#define BTXIDCOFFSET 0xff
2023#define BTXIQDCOFFSET 0xff00
2024#define BTXDFIRMODE 0x10000
2025#define BTXPESUDO_NOISEON 0x4000000
2026#define BTXPESUDO_NOISE_A 0xff
2027#define BTXPESUDO_NOISE_B 0xff00
2028#define BTXPESUDO_NOISE_C 0xff0000
2029#define BTXPESUDO_NOISE_D 0xff000000
2030#define BCCA_DROPOPTION 0x20000
2031#define BCCA_DROPTHRES 0xfff00000
2032#define BEDCCA_H 0xf
2033#define BEDCCA_L 0xf0
2034#define BLAMBDA_ED 0x300
2035#define BRX_INITIALGAIN 0x7f
2036#define BRX_ANTDIV_EN 0x80
2037#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
2038#define BRX_HIGHPOWER_FLOW 0x8000
2039#define BRX_AGC_FREEZE_THRES 0xc0000
2040#define BRX_FREEZESTEP_AGC1 0x300000
2041#define BRX_FREEZESTEP_AGC2 0xc00000
2042#define BRX_FREEZESTEP_AGC3 0x3000000
2043#define BRX_FREEZESTEP_AGC0 0xc000000
2044#define BRXRSSI_CMP_EN 0x10000000
2045#define BRXQUICK_AGCEN 0x20000000
2046#define BRXAGC_FREEZE_THRES_MODE 0x40000000
2047#define BRX_OVERFLOW_CHECKTYPE 0x80000000
2048#define BRX_AGCSHIFT 0x7f
2049#define BTRSW_TRI_ONLY 0x80
2050#define BPOWER_THRES 0x300
2051#define BRXAGC_EN 0x1
2052#define BRXAGC_TOGETHER_EN 0x2
2053#define BRXAGC_MIN 0x4
2054#define BRXHP_INI 0x7
2055#define BRXHP_TRLNA 0x70
2056#define BRXHP_RSSI 0x700
2057#define BRXHP_BBP1 0x7000
2058#define BRXHP_BBP2 0x70000
2059#define BRXHP_BBP3 0x700000
2060#define BRSSI_H 0x7f0000
2061#define BRSSI_GEN 0x7f000000
2062#define BRXSETTLE_TRSW 0x7
2063#define BRXSETTLE_LNA 0x38
2064#define BRXSETTLE_RSSI 0x1c0
2065#define BRXSETTLE_BBP 0xe00
2066#define BRXSETTLE_RXHP 0x7000
2067#define BRXSETTLE_ANTSW_RSSI 0x38000
2068#define BRXSETTLE_ANTSW 0xc0000
2069#define BRXPROCESS_TIME_DAGC 0x300000
2070#define BRXSETTLE_HSSI 0x400000
2071#define BRXPROCESS_TIME_BBPPW 0x800000
2072#define BRXANTENNA_POWER_SHIFT 0x3000000
2073#define BRSSI_TABLE_SELECT 0xc000000
2074#define BRXHP_FINAL 0x7000000
2075#define BRXHPSETTLE_BBP 0x7
2076#define BRXHTSETTLE_HSSI 0x8
2077#define BRXHTSETTLE_RXHP 0x70
2078#define BRXHTSETTLE_BBPPW 0x80
2079#define BRXHTSETTLE_IDLE 0x300
2080#define BRXHTSETTLE_RESERVED 0x1c00
2081#define BRXHT_RXHP_EN 0x8000
2082#define BRXAGC_FREEZE_THRES 0x30000
2083#define BRXAGC_TOGETHEREN 0x40000
2084#define BRXHTAGC_MIN 0x80000
2085#define BRXHTAGC_EN 0x100000
2086#define BRXHTDAGC_EN 0x200000
2087#define BRXHT_RXHP_BBP 0x1c00000
2088#define BRXHT_RXHP_FINAL 0xe0000000
2089#define BRXPW_RADIO_TH 0x3
2090#define BRXPW_RADIO_EN 0x4
2091#define BRXMF_HOLD 0x3800
2092#define BRXPD_DELAY_TH1 0x38
2093#define BRXPD_DELAY_TH2 0x1c0
2094#define BRXPD_DC_COUNT_MAX 0x600
2095#define BRXPD_DELAY_TH 0x8000
2096#define BRXPROCESS_DELAY 0xf0000
2097#define BRXSEARCHRANGE_GI2_EARLY 0x700000
2098#define BRXFRAME_FUARD_COUNTER_L 0x3800000
2099#define BRXSGI_GUARD_L 0xc000000
2100#define BRXSGI_SEARCH_L 0x30000000
2101#define BRXSGI_TH 0xc0000000
2102#define BDFSCNT0 0xff
2103#define BDFSCNT1 0xff00
2104#define BDFSFLAG 0xf0000
2105#define BMF_WEIGHT_SUM 0x300000
2106#define BMINIDX_TH 0x7f000000
2107#define BDAFORMAT 0x40000
2108#define BTXCH_EMU_ENABLE 0x01000000
2109#define BTRSW_ISOLATION_A 0x7f
2110#define BTRSW_ISOLATION_B 0x7f00
2111#define BTRSW_ISOLATION_C 0x7f0000
2112#define BTRSW_ISOLATION_D 0x7f000000
2113#define BEXT_LNA_GAIN 0x7c00
2114
2115#define BSTBC_EN 0x4
2116#define BANTENNA_MAPPING 0x10
2117#define BNSS 0x20
2118#define BCFO_ANTSUM_ID 0x200
2119#define BPHY_COUNTER_RESET 0x8000000
2120#define BCFO_REPORT_GET 0x4000000
2121#define BOFDM_CONTINUE_TX 0x10000000
2122#define BOFDM_SINGLE_CARRIER 0x20000000
2123#define BOFDM_SINGLE_TONE 0x40000000
2124#define BHT_DETECT 0x100
2125#define BCFOEN 0x10000
2126#define BCFOVALUE 0xfff00000
2127#define BSIGTONE_RE 0x3f
2128#define BSIGTONE_IM 0x7f00
2129#define BCOUNTER_CCA 0xffff
2130#define BCOUNTER_PARITYFAIL 0xffff0000
2131#define BCOUNTER_RATEILLEGAL 0xffff
2132#define BCOUNTER_CRC8FAIL 0xffff0000
2133#define BCOUNTER_MCSNOSUPPORT 0xffff
2134#define BCOUNTER_FASTSYNC 0xffff
2135#define BSHORTCFO 0xfff
2136#define BSHORTCFOT_LENGTH 12
2137#define BSHORTCFOF_LENGTH 11
2138#define BLONGCFO 0x7ff
2139#define BLONGCFOT_LENGTH 11
2140#define BLONGCFOF_LENGTH 11
2141#define BTAILCFO 0x1fff
2142#define BTAILCFOT_LENGTH 13
2143#define BTAILCFOF_LENGTH 12
2144#define BNOISE_EN_PWDB 0xffff
2145#define BCC_POWER_DB 0xffff0000
2146#define BMOISE_PWDB 0xffff
2147#define BPOWERMEAST_LENGTH 10
2148#define BPOWERMEASF_LENGTH 3
2149#define BRX_HT_BW 0x1
2150#define BRXSC 0x6
2151#define BRX_HT 0x8
2152#define BNB_INTF_DET_ON 0x1
2153#define BINTF_WIN_LEN_CFG 0x30
2154#define BNB_INTF_TH_CFG 0x1c0
2155#define BRFGAIN 0x3f
2156#define BTABLESEL 0x40
2157#define BTRSW 0x80
2158#define BRXSNR_A 0xff
2159#define BRXSNR_B 0xff00
2160#define BRXSNR_C 0xff0000
2161#define BRXSNR_D 0xff000000
2162#define BSNR_EVMT_LENGTH 8
2163#define BSNR_EVMF_LENGTH 1
2164#define BCSI1ST 0xff
2165#define BCSI2ND 0xff00
2166#define BRXEVM1ST 0xff0000
2167#define BRXEVM2ND 0xff000000
2168#define BSIGEVM 0xff
2169#define BPWDB 0xff00
2170#define BSGIEN 0x10000
2171
2172#define BSFACTOR_QMA1 0xf
2173#define BSFACTOR_QMA2 0xf0
2174#define BSFACTOR_QMA3 0xf00
2175#define BSFACTOR_QMA4 0xf000
2176#define BSFACTOR_QMA5 0xf0000
2177#define BSFACTOR_QMA6 0xf0000
2178#define BSFACTOR_QMA7 0xf00000
2179#define BSFACTOR_QMA8 0xf000000
2180#define BSFACTOR_QMA9 0xf0000000
2181#define BCSI_SCHEME 0x100000
2182
2183#define BNOISE_LVL_TOP_SET 0x3
2184#define BCHSMOOTH 0x4
2185#define BCHSMOOTH_CFG1 0x38
2186#define BCHSMOOTH_CFG2 0x1c0
2187#define BCHSMOOTH_CFG3 0xe00
2188#define BCHSMOOTH_CFG4 0x7000
2189#define BMRCMODE 0x800000
2190#define BTHEVMCFG 0x7000000
2191
2192#define BLOOP_FIT_TYPE 0x1
2193#define BUPD_CFO 0x40
2194#define BUPD_CFO_OFFDATA 0x80
2195#define BADV_UPD_CFO 0x100
2196#define BADV_TIME_CTRL 0x800
2197#define BUPD_CLKO 0x1000
2198#define BFC 0x6000
2199#define BTRACKING_MODE 0x8000
2200#define BPHCMP_ENABLE 0x10000
2201#define BUPD_CLKO_LTF 0x20000
2202#define BCOM_CH_CFO 0x40000
2203#define BCSI_ESTI_MODE 0x80000
2204#define BADV_UPD_EQZ 0x100000
2205#define BUCHCFG 0x7000000
2206#define BUPDEQZ 0x8000000
2207
2208#define BRX_PESUDO_NOISE_ON 0x20000000
2209#define BRX_PESUDO_NOISE_A 0xff
2210#define BRX_PESUDO_NOISE_B 0xff00
2211#define BRX_PESUDO_NOISE_C 0xff0000
2212#define BRX_PESUDO_NOISE_D 0xff000000
2213#define BRX_PESUDO_NOISESTATE_A 0xffff
2214#define BRX_PESUDO_NOISESTATE_B 0xffff0000
2215#define BRX_PESUDO_NOISESTATE_C 0xffff
2216#define BRX_PESUDO_NOISESTATE_D 0xffff0000
2217
2218#define BZEBRA1_HSSIENABLE 0x8
2219#define BZEBRA1_TRXCONTROL 0xc00
2220#define BZEBRA1_TRXGAINSETTING 0x07f
2221#define BZEBRA1_RXCOUNTER 0xc00
2222#define BZEBRA1_TXCHANGEPUMP 0x38
2223#define BZEBRA1_RXCHANGEPUMP 0x7
2224#define BZEBRA1_CHANNEL_NUM 0xf80
2225#define BZEBRA1_TXLPFBW 0x400
2226#define BZEBRA1_RXLPFBW 0x600
2227
2228#define BRTL8256REG_MODE_CTRL1 0x100
2229#define BRTL8256REG_MODE_CTRL0 0x40
2230#define BRTL8256REG_TXLPFBW 0x18
2231#define BRTL8256REG_RXLPFBW 0x600
2232
2233#define BRTL8258_TXLPFBW 0xc
2234#define BRTL8258_RXLPFBW 0xc00
2235#define BRTL8258_RSSILPFBW 0xc0
2236
2237#define BBYTE0 0x1
2238#define BBYTE1 0x2
2239#define BBYTE2 0x4
2240#define BBYTE3 0x8
2241#define BWORD0 0x3
2242#define BWORD1 0xc
2243#define BWORD 0xf
2244
2245#define BENABLE 0x1
2246#define BDISABLE 0x0
2247
2248#define LEFT_ANTENNA 0x0
2249#define RIGHT_ANTENNA 0x1
2250
2251#define TCHECK_TXSTATUS 500
2252#define TUPDATE_RXCOUNTER 100
2253
2254#define REG_UN_used_register 0x01bf
2255
2256/* WOL bit information */
2257#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
2258#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
2259#define HAL92C_WOL_DISASSOC_EVENT BIT(2)
2260#define HAL92C_WOL_DEAUTH_EVENT BIT(3)
2261#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4)
2262
2263#define WOL_REASON_PTK_UPDATE BIT(0)
2264#define WOL_REASON_GTK_UPDATE BIT(1)
2265#define WOL_REASON_DISASSOC BIT(2)
2266#define WOL_REASON_DEAUTH BIT(3)
2267#define WOL_REASON_FW_DISCONNECT BIT(4)
2268
2269/* 2 EFUSE_TEST (For RTL8723 partially) */
2270#define EFUSE_SEL(x) (((x) & 0x3) << 8)
2271#define EFUSE_SEL_MASK 0x300
2272#define EFUSE_WIFI_SEL_0 0x0
2273
2274#define WL_HWPDN_EN BIT(0) /* Enable GPIO[9] as WiFi HW PDn source*/
2275#define WL_HWPDN_SL BIT(1) /* WiFi HW PDn polarity control*/
2276
2277#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c
new file mode 100644
index 000000000000..486294930a7b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c
@@ -0,0 +1,504 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "reg.h"
28#include "def.h"
29#include "phy.h"
30#include "rf.h"
31#include "dm.h"
32
33static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
34
35void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
36{
37 struct rtl_priv *rtlpriv = rtl_priv(hw);
38 struct rtl_phy *rtlphy = &(rtlpriv->phy);
39
40 switch (bandwidth) {
41 case HT_CHANNEL_WIDTH_20:
42 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
43 0xfffff3ff) | BIT(10) | BIT(11));
44 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
45 rtlphy->rfreg_chnlval[0]);
46 break;
47 case HT_CHANNEL_WIDTH_20_40:
48 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
49 0xfffff3ff) | BIT(10));
50 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
51 rtlphy->rfreg_chnlval[0]);
52 break;
53 default:
54 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
55 "unknown bandwidth: %#X\n", bandwidth);
56 break;
57 }
58}
59
60void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
61 u8 *ppowerlevel)
62{
63 struct rtl_priv *rtlpriv = rtl_priv(hw);
64 struct rtl_phy *rtlphy = &(rtlpriv->phy);
65 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
66 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
67 u32 tx_agc[2] = {0, 0}, tmpval;
68 bool turbo_scanoff = false;
69 u8 idx1, idx2;
70 u8 *ptr;
71 u8 direction;
72 u32 pwrtrac_value;
73
74 if (rtlefuse->eeprom_regulatory != 0)
75 turbo_scanoff = true;
76
77 if (mac->act_scanning) {
78 tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
79 tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
80
81 if (turbo_scanoff) {
82 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
83 tx_agc[idx1] = ppowerlevel[idx1] |
84 (ppowerlevel[idx1] << 8) |
85 (ppowerlevel[idx1] << 16) |
86 (ppowerlevel[idx1] << 24);
87 }
88 }
89 } else {
90 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
91 tx_agc[idx1] = ppowerlevel[idx1] |
92 (ppowerlevel[idx1] << 8) |
93 (ppowerlevel[idx1] << 16) |
94 (ppowerlevel[idx1] << 24);
95 }
96 if (rtlefuse->eeprom_regulatory == 0) {
97 tmpval =
98 (rtlphy->mcs_offset[0][6]) +
99 (rtlphy->mcs_offset[0][7] << 8);
100 tx_agc[RF90_PATH_A] += tmpval;
101
102 tmpval = (rtlphy->mcs_offset[0][14]) +
103 (rtlphy->mcs_offset[0][15] <<
104 24);
105 tx_agc[RF90_PATH_B] += tmpval;
106 }
107 }
108 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
109 ptr = (u8 *)(&(tx_agc[idx1]));
110 for (idx2 = 0; idx2 < 4; idx2++) {
111 if (*ptr > RF6052_MAX_TX_PWR)
112 *ptr = RF6052_MAX_TX_PWR;
113 ptr++;
114 }
115 }
116 rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
117 if (direction == 1) {
118 tx_agc[0] += pwrtrac_value;
119 tx_agc[1] += pwrtrac_value;
120 } else if (direction == 2) {
121 tx_agc[0] -= pwrtrac_value;
122 tx_agc[1] -= pwrtrac_value;
123 }
124 tmpval = tx_agc[RF90_PATH_A] & 0xff;
125 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
126
127 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
128 "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
129 RTXAGC_A_CCK1_MCS32);
130
131 tmpval = tx_agc[RF90_PATH_A] >> 8;
132
133 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
134
135 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
136 "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
137 RTXAGC_B_CCK11_A_CCK2_11);
138
139 tmpval = tx_agc[RF90_PATH_B] >> 24;
140 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
141
142 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
143 "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
144 RTXAGC_B_CCK11_A_CCK2_11);
145
146 tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
147 rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
148
149 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
150 "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
151 RTXAGC_B_CCK1_55_MCS32);
152}
153
154static void rtl8723be_phy_get_power_base(struct ieee80211_hw *hw,
155 u8 *ppowerlevel_ofdm,
156 u8 *ppowerlevel_bw20,
157 u8 *ppowerlevel_bw40,
158 u8 channel, u32 *ofdmbase,
159 u32 *mcsbase)
160{
161 struct rtl_priv *rtlpriv = rtl_priv(hw);
162 struct rtl_phy *rtlphy = &(rtlpriv->phy);
163 u32 powerbase0, powerbase1;
164 u8 i, powerlevel[2];
165
166 for (i = 0; i < 2; i++) {
167 powerbase0 = ppowerlevel_ofdm[i];
168
169 powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
170 (powerbase0 << 8) | powerbase0;
171 *(ofdmbase + i) = powerbase0;
172 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
173 " [OFDM power base index rf(%c) = 0x%x]\n",
174 ((i == 0) ? 'A' : 'B'), *(ofdmbase + i));
175 }
176
177 for (i = 0; i < 2; i++) {
178 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
179 powerlevel[i] = ppowerlevel_bw20[i];
180 else
181 powerlevel[i] = ppowerlevel_bw40[i];
182 powerbase1 = powerlevel[i];
183 powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
184 (powerbase1 << 8) | powerbase1;
185
186 *(mcsbase + i) = powerbase1;
187
188 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
189 " [MCS power base index rf(%c) = 0x%x]\n",
190 ((i == 0) ? 'A' : 'B'), *(mcsbase + i));
191 }
192}
193
194static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index,
195 u32 *powerbase0, u32 *powerbase1,
196 u32 *p_outwriteval)
197{
198 struct rtl_priv *rtlpriv = rtl_priv(hw);
199 struct rtl_phy *rtlphy = &(rtlpriv->phy);
200 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
201 u8 i, chnlgroup = 0, pwr_diff_limit[4];
202 u8 pwr_diff = 0, customer_pwr_diff;
203 u32 writeval, customer_limit, rf;
204
205 for (rf = 0; rf < 2; rf++) {
206 switch (rtlefuse->eeprom_regulatory) {
207 case 0:
208 chnlgroup = 0;
209
210 writeval =
211 rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)]
212 + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
213
214 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
215 "RTK better performance, "
216 "writeval(%c) = 0x%x\n",
217 ((rf == 0) ? 'A' : 'B'), writeval);
218 break;
219 case 1:
220 if (rtlphy->pwrgroup_cnt == 1) {
221 chnlgroup = 0;
222 } else {
223 if (channel < 3)
224 chnlgroup = 0;
225 else if (channel < 6)
226 chnlgroup = 1;
227 else if (channel < 9)
228 chnlgroup = 2;
229 else if (channel < 12)
230 chnlgroup = 3;
231 else if (channel < 14)
232 chnlgroup = 4;
233 else if (channel == 14)
234 chnlgroup = 5;
235 }
236 writeval = rtlphy->mcs_offset[chnlgroup]
237 [index + (rf ? 8 : 0)] + ((index < 2) ?
238 powerbase0[rf] :
239 powerbase1[rf]);
240
241 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
242 "Realtek regulatory, 20MHz, "
243 "writeval(%c) = 0x%x\n",
244 ((rf == 0) ? 'A' : 'B'), writeval);
245
246 break;
247 case 2:
248 writeval =
249 ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
250
251 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
252 "Better regulatory, "
253 "writeval(%c) = 0x%x\n",
254 ((rf == 0) ? 'A' : 'B'), writeval);
255 break;
256 case 3:
257 chnlgroup = 0;
258
259 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
260 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
261 "customer's limit, 40MHz "
262 "rf(%c) = 0x%x\n",
263 ((rf == 0) ? 'A' : 'B'),
264 rtlefuse->pwrgroup_ht40[rf]
265 [channel-1]);
266 } else {
267 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
268 "customer's limit, 20MHz "
269 "rf(%c) = 0x%x\n",
270 ((rf == 0) ? 'A' : 'B'),
271 rtlefuse->pwrgroup_ht20[rf]
272 [channel-1]);
273 }
274
275 if (index < 2)
276 pwr_diff =
277 rtlefuse->txpwr_legacyhtdiff[rf][channel-1];
278 else if (rtlphy->current_chan_bw ==
279 HT_CHANNEL_WIDTH_20)
280 pwr_diff =
281 rtlefuse->txpwr_ht20diff[rf][channel-1];
282
283 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
284 customer_pwr_diff =
285 rtlefuse->pwrgroup_ht40[rf][channel-1];
286 else
287 customer_pwr_diff =
288 rtlefuse->pwrgroup_ht20[rf][channel-1];
289
290 if (pwr_diff > customer_pwr_diff)
291 pwr_diff = 0;
292 else
293 pwr_diff = customer_pwr_diff - pwr_diff;
294
295 for (i = 0; i < 4; i++) {
296 pwr_diff_limit[i] =
297 (u8)((rtlphy->mcs_offset
298 [chnlgroup][index + (rf ? 8 : 0)] &
299 (0x7f << (i * 8))) >> (i * 8));
300
301 if (pwr_diff_limit[i] > pwr_diff)
302 pwr_diff_limit[i] = pwr_diff;
303 }
304
305 customer_limit = (pwr_diff_limit[3] << 24) |
306 (pwr_diff_limit[2] << 16) |
307 (pwr_diff_limit[1] << 8) |
308 (pwr_diff_limit[0]);
309
310 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
311 "Customer's limit rf(%c) = 0x%x\n",
312 ((rf == 0) ? 'A' : 'B'), customer_limit);
313
314 writeval = customer_limit + ((index < 2) ?
315 powerbase0[rf] :
316 powerbase1[rf]);
317
318 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
319 "Customer, writeval rf(%c)= 0x%x\n",
320 ((rf == 0) ? 'A' : 'B'), writeval);
321 break;
322 default:
323 chnlgroup = 0;
324 writeval =
325 rtlphy->mcs_offset[chnlgroup]
326 [index + (rf ? 8 : 0)]
327 + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
328
329 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
330 "RTK better performance, writeval "
331 "rf(%c) = 0x%x\n",
332 ((rf == 0) ? 'A' : 'B'), writeval);
333 break;
334 }
335
336 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
337 writeval = writeval - 0x06060606;
338 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
339 TXHIGHPWRLEVEL_BT2)
340 writeval = writeval - 0x0c0c0c0c;
341 *(p_outwriteval + rf) = writeval;
342 }
343}
344
345static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw,
346 u8 index, u32 *value)
347{
348 struct rtl_priv *rtlpriv = rtl_priv(hw);
349 u16 regoffset_a[6] = {
350 RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
351 RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
352 RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
353 };
354 u16 regoffset_b[6] = {
355 RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
356 RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
357 RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
358 };
359 u8 i, rf, pwr_val[4];
360 u32 writeval;
361 u16 regoffset;
362
363 for (rf = 0; rf < 2; rf++) {
364 writeval = value[rf];
365 for (i = 0; i < 4; i++) {
366 pwr_val[i] = (u8) ((writeval & (0x7f <<
367 (i * 8))) >> (i * 8));
368
369 if (pwr_val[i] > RF6052_MAX_TX_PWR)
370 pwr_val[i] = RF6052_MAX_TX_PWR;
371 }
372 writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
373 (pwr_val[1] << 8) | pwr_val[0];
374
375 if (rf == 0)
376 regoffset = regoffset_a[index];
377 else
378 regoffset = regoffset_b[index];
379 rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
380
381 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
382 "Set 0x%x = %08x\n", regoffset, writeval);
383 }
384}
385
386void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
387 u8 *ppowerlevel_ofdm,
388 u8 *ppowerlevel_bw20,
389 u8 *ppowerlevel_bw40, u8 channel)
390{
391 u32 writeval[2], powerbase0[2], powerbase1[2];
392 u8 index;
393 u8 direction;
394 u32 pwrtrac_value;
395
396 rtl8723be_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20,
397 ppowerlevel_bw40, channel,
398 &powerbase0[0], &powerbase1[0]);
399
400 rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
401
402 for (index = 0; index < 6; index++) {
403 txpwr_by_regulatory(hw, channel, index, &powerbase0[0],
404 &powerbase1[0], &writeval[0]);
405 if (direction == 1) {
406 writeval[0] += pwrtrac_value;
407 writeval[1] += pwrtrac_value;
408 } else if (direction == 2) {
409 writeval[0] -= pwrtrac_value;
410 writeval[1] -= pwrtrac_value;
411 }
412 _rtl8723be_write_ofdm_power_reg(hw, index, &writeval[0]);
413 }
414}
415
416bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw)
417{
418 struct rtl_priv *rtlpriv = rtl_priv(hw);
419 struct rtl_phy *rtlphy = &(rtlpriv->phy);
420
421 if (rtlphy->rf_type == RF_1T1R)
422 rtlphy->num_total_rfpath = 1;
423 else
424 rtlphy->num_total_rfpath = 2;
425
426 return _rtl8723be_phy_rf6052_config_parafile(hw);
427}
428
429static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
430{
431 struct rtl_priv *rtlpriv = rtl_priv(hw);
432 struct rtl_phy *rtlphy = &(rtlpriv->phy);
433 struct bb_reg_def *pphyreg;
434 u32 u4_regvalue = 0;
435 u8 rfpath;
436 bool rtstatus = true;
437
438 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
439 pphyreg = &rtlphy->phyreg_def[rfpath];
440
441 switch (rfpath) {
442 case RF90_PATH_A:
443 case RF90_PATH_C:
444 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
445 BRFSI_RFENV);
446 break;
447 case RF90_PATH_B:
448 case RF90_PATH_D:
449 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
450 BRFSI_RFENV << 16);
451 break;
452 }
453
454 rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
455 udelay(1);
456
457 rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
458 udelay(1);
459
460 rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
461 B3WIREADDREAALENGTH, 0x0);
462 udelay(1);
463
464 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
465 udelay(1);
466
467 switch (rfpath) {
468 case RF90_PATH_A:
469 rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
470 (enum radio_path)rfpath);
471 break;
472 case RF90_PATH_B:
473 rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
474 (enum radio_path)rfpath);
475 break;
476 case RF90_PATH_C:
477 break;
478 case RF90_PATH_D:
479 break;
480 }
481
482 switch (rfpath) {
483 case RF90_PATH_A:
484 case RF90_PATH_C:
485 rtl_set_bbreg(hw, pphyreg->rfintfs,
486 BRFSI_RFENV, u4_regvalue);
487 break;
488 case RF90_PATH_B:
489 case RF90_PATH_D:
490 rtl_set_bbreg(hw, pphyreg->rfintfs,
491 BRFSI_RFENV << 16, u4_regvalue);
492 break;
493 }
494
495 if (!rtstatus) {
496 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
497 "Radio[%d] Fail!!", rfpath);
498 return false;
499 }
500 }
501
502 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
503 return rtstatus;
504}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
new file mode 100644
index 000000000000..a6fea106ced4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
@@ -0,0 +1,43 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_RF_H__
27#define __RTL8723BE_RF_H__
28
29#define RF6052_MAX_TX_PWR 0x3F
30#define RF6052_MAX_REG 0x3F
31
32void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
33 u8 bandwidth);
34void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
35 u8 *ppowerlevel);
36void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
37 u8 *ppowerlevel_ofdm,
38 u8 *ppowerlevel_bw20,
39 u8 *ppowerlevel_bw40,
40 u8 channel);
41bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw);
42
43#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
new file mode 100644
index 000000000000..b4577ebc4bb0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -0,0 +1,384 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../core.h"
28#include "../pci.h"
29#include "reg.h"
30#include "def.h"
31#include "phy.h"
32#include "../rtl8723com/phy_common.h"
33#include "dm.h"
34#include "hw.h"
35#include "fw.h"
36#include "../rtl8723com/fw_common.h"
37#include "sw.h"
38#include "trx.h"
39#include "led.h"
40#include "table.h"
41#include "../btcoexist/rtl_btc.h"
42
43#include <linux/vmalloc.h>
44#include <linux/module.h>
45
46static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
47{
48 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
49
50 /*close ASPM for AMD defaultly */
51 rtlpci->const_amdpci_aspm = 0;
52
53 /* ASPM PS mode.
54 * 0 - Disable ASPM,
55 * 1 - Enable ASPM without Clock Req,
56 * 2 - Enable ASPM with Clock Req,
57 * 3 - Alwyas Enable ASPM with Clock Req,
58 * 4 - Always Enable ASPM without Clock Req.
59 * set defult to RTL8192CE:3 RTL8192E:2
60 */
61 rtlpci->const_pci_aspm = 3;
62
63 /*Setting for PCI-E device */
64 rtlpci->const_devicepci_aspm_setting = 0x03;
65
66 /*Setting for PCI-E bridge */
67 rtlpci->const_hostpci_aspm_setting = 0x02;
68
69 /* In Hw/Sw Radio Off situation.
70 * 0 - Default,
71 * 1 - From ASPM setting without low Mac Pwr,
72 * 2 - From ASPM setting with low Mac Pwr,
73 * 3 - Bus D3
74 * set default to RTL8192CE:0 RTL8192SE:2
75 */
76 rtlpci->const_hwsw_rfoff_d3 = 0;
77
78 /* This setting works for those device with
79 * backdoor ASPM setting such as EPHY setting.
80 * 0 - Not support ASPM,
81 * 1 - Support ASPM,
82 * 2 - According to chipset.
83 */
84 rtlpci->const_support_pciaspm = 1;
85}
86
87int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
88{
89 int err = 0;
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
92 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
93
94 rtl8723be_bt_reg_init(hw);
95 rtlpci->msi_support = true;
96 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
97
98 rtlpriv->dm.dm_initialgain_enable = 1;
99 rtlpriv->dm.dm_flag = 0;
100 rtlpriv->dm.disable_framebursting = 0;
101 rtlpriv->dm.thermalvalue = 0;
102 rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
103
104 mac->ht_enable = true;
105
106 /* compatible 5G band 88ce just 2.4G band & smsp */
107 rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
108 rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
109 rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
110
111 rtlpci->receive_config = (RCR_APPFCS |
112 RCR_APP_MIC |
113 RCR_APP_ICV |
114 RCR_APP_PHYST_RXFF |
115 RCR_HTC_LOC_CTRL |
116 RCR_AMF |
117 RCR_ACF |
118 RCR_ADF |
119 RCR_AICV |
120 RCR_AB |
121 RCR_AM |
122 RCR_APM |
123 0);
124
125 rtlpci->irq_mask[0] = (u32) (IMR_PSTIMEOUT |
126 IMR_HSISR_IND_ON_INT |
127 IMR_C2HCMD |
128 IMR_HIGHDOK |
129 IMR_MGNTDOK |
130 IMR_BKDOK |
131 IMR_BEDOK |
132 IMR_VIDOK |
133 IMR_VODOK |
134 IMR_RDU |
135 IMR_ROK |
136 0);
137
138 rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0);
139
140 /* for debug level */
141 rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
142 /* for LPS & IPS */
143 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
144 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
145 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
146 rtlpriv->psc.reg_fwctrl_lps = 3;
147 rtlpriv->psc.reg_max_lps_awakeintvl = 5;
148 /* for ASPM, you can close aspm through
149 * set const_support_pciaspm = 0
150 */
151 rtl8723be_init_aspm_vars(hw);
152
153 if (rtlpriv->psc.reg_fwctrl_lps == 1)
154 rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
155 else if (rtlpriv->psc.reg_fwctrl_lps == 2)
156 rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
157 else if (rtlpriv->psc.reg_fwctrl_lps == 3)
158 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
159
160 /* for firmware buf */
161 rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
162 if (!rtlpriv->rtlhal.pfirmware) {
163 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
164 "Can't alloc buffer for fw.\n");
165 return 1;
166 }
167
168 rtlpriv->max_fw_size = 0x8000;
169 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
170 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
171 rtlpriv->io.dev, GFP_KERNEL, hw,
172 rtl_fw_cb);
173 if (err) {
174 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
175 "Failed to request firmware!\n");
176 return 1;
177 }
178 return 0;
179}
180
181void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
182{
183 struct rtl_priv *rtlpriv = rtl_priv(hw);
184
185 if (rtlpriv->cfg->ops->get_btc_status())
186 rtlpriv->btcoexist.btc_ops->btc_halt_notify();
187 if (rtlpriv->rtlhal.pfirmware) {
188 vfree(rtlpriv->rtlhal.pfirmware);
189 rtlpriv->rtlhal.pfirmware = NULL;
190 }
191}
192
193/* get bt coexist status */
194bool rtl8723be_get_btc_status(void)
195{
196 return true;
197}
198
199static bool is_fw_header(struct rtl92c_firmware_header *hdr)
200{
201 return (hdr->signature & 0xfff0) == 0x5300;
202}
203
204static struct rtl_hal_ops rtl8723be_hal_ops = {
205 .init_sw_vars = rtl8723be_init_sw_vars,
206 .deinit_sw_vars = rtl8723be_deinit_sw_vars,
207 .read_eeprom_info = rtl8723be_read_eeprom_info,
208 .interrupt_recognized = rtl8723be_interrupt_recognized,
209 .hw_init = rtl8723be_hw_init,
210 .hw_disable = rtl8723be_card_disable,
211 .hw_suspend = rtl8723be_suspend,
212 .hw_resume = rtl8723be_resume,
213 .enable_interrupt = rtl8723be_enable_interrupt,
214 .disable_interrupt = rtl8723be_disable_interrupt,
215 .set_network_type = rtl8723be_set_network_type,
216 .set_chk_bssid = rtl8723be_set_check_bssid,
217 .set_qos = rtl8723be_set_qos,
218 .set_bcn_reg = rtl8723be_set_beacon_related_registers,
219 .set_bcn_intv = rtl8723be_set_beacon_interval,
220 .update_interrupt_mask = rtl8723be_update_interrupt_mask,
221 .get_hw_reg = rtl8723be_get_hw_reg,
222 .set_hw_reg = rtl8723be_set_hw_reg,
223 .update_rate_tbl = rtl8723be_update_hal_rate_tbl,
224 .fill_tx_desc = rtl8723be_tx_fill_desc,
225 .fill_tx_cmddesc = rtl8723be_tx_fill_cmddesc,
226 .query_rx_desc = rtl8723be_rx_query_desc,
227 .set_channel_access = rtl8723be_update_channel_access_setting,
228 .radio_onoff_checking = rtl8723be_gpio_radio_on_off_checking,
229 .set_bw_mode = rtl8723be_phy_set_bw_mode,
230 .switch_channel = rtl8723be_phy_sw_chnl,
231 .dm_watchdog = rtl8723be_dm_watchdog,
232 .scan_operation_backup = rtl8723be_phy_scan_operation_backup,
233 .set_rf_power_state = rtl8723be_phy_set_rf_power_state,
234 .led_control = rtl8723be_led_control,
235 .set_desc = rtl8723be_set_desc,
236 .get_desc = rtl8723be_get_desc,
237 .is_tx_desc_closed = rtl8723be_is_tx_desc_closed,
238 .tx_polling = rtl8723be_tx_polling,
239 .enable_hw_sec = rtl8723be_enable_hw_security_config,
240 .set_key = rtl8723be_set_key,
241 .init_sw_leds = rtl8723be_init_sw_leds,
242 .get_bbreg = rtl8723_phy_query_bb_reg,
243 .set_bbreg = rtl8723_phy_set_bb_reg,
244 .get_rfreg = rtl8723be_phy_query_rf_reg,
245 .set_rfreg = rtl8723be_phy_set_rf_reg,
246 .fill_h2c_cmd = rtl8723be_fill_h2c_cmd,
247 .get_btc_status = rtl8723be_get_btc_status,
248 .is_fw_header = is_fw_header,
249};
250
251static struct rtl_mod_params rtl8723be_mod_params = {
252 .sw_crypto = false,
253 .inactiveps = true,
254 .swctrl_lps = false,
255 .fwctrl_lps = true,
256 .debug = DBG_EMERG,
257};
258
259static struct rtl_hal_cfg rtl8723be_hal_cfg = {
260 .bar_id = 2,
261 .write_readback = true,
262 .name = "rtl8723be_pci",
263 .fw_name = "rtlwifi/rtl8723befw.bin",
264 .ops = &rtl8723be_hal_ops,
265 .mod_params = &rtl8723be_mod_params,
266 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
267 .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
268 .maps[SYS_CLK] = REG_SYS_CLKR,
269 .maps[MAC_RCR_AM] = AM,
270 .maps[MAC_RCR_AB] = AB,
271 .maps[MAC_RCR_ACRC32] = ACRC32,
272 .maps[MAC_RCR_ACF] = ACF,
273 .maps[MAC_RCR_AAP] = AAP,
274
275 .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
276
277 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
278 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
279 .maps[EFUSE_CLK] = 0,
280 .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
281 .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
282 .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
283 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
284 .maps[EFUSE_ANA8M] = ANA8M,
285 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
286 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
287 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
288 .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
289
290 .maps[RWCAM] = REG_CAMCMD,
291 .maps[WCAMI] = REG_CAMWRITE,
292 .maps[RCAMO] = REG_CAMREAD,
293 .maps[CAMDBG] = REG_CAMDBG,
294 .maps[SECR] = REG_SECCFG,
295 .maps[SEC_CAM_NONE] = CAM_NONE,
296 .maps[SEC_CAM_WEP40] = CAM_WEP40,
297 .maps[SEC_CAM_TKIP] = CAM_TKIP,
298 .maps[SEC_CAM_AES] = CAM_AES,
299 .maps[SEC_CAM_WEP104] = CAM_WEP104,
300
301 .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
302 .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
303 .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
304 .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
305 .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
306 .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
307 .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
308 .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
309 .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
310 .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
311 .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
312 .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
313 .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
314
315 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
316 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
317 .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0,
318 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
319 .maps[RTL_IMR_RDU] = IMR_RDU,
320 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
321 .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
322 .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
323 .maps[RTL_IMR_TBDER] = IMR_TBDER,
324 .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
325 .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
326 .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
327 .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
328 .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
329 .maps[RTL_IMR_VODOK] = IMR_VODOK,
330 .maps[RTL_IMR_ROK] = IMR_ROK,
331 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
332
333 .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
334 .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
335 .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
336 .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
337 .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
338 .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
339 .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
340 .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
341 .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
342 .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
343 .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
344 .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
345
346 .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
347 .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
348};
349
350static DEFINE_PCI_DEVICE_TABLE(rtl8723be_pci_id) = {
351 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb723, rtl8723be_hal_cfg)},
352 {},
353};
354
355MODULE_DEVICE_TABLE(pci, rtl8723be_pci_id);
356
357MODULE_AUTHOR("PageHe <page_he@realsil.com.cn>");
358MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
359MODULE_LICENSE("GPL");
360MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
361MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
362
363module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
364module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
365module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
366module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
367module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
368MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
369MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
370MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
371MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
372
373static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
374
375static struct pci_driver rtl8723be_driver = {
376 .name = KBUILD_MODNAME,
377 .id_table = rtl8723be_pci_id,
378 .probe = rtl_pci_probe,
379 .remove = rtl_pci_disconnect,
380
381 .driver.pm = &rtlwifi_pm_ops,
382};
383
384module_pci_driver(rtl8723be_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/rtlwifi/rtl8723be/sw.h
new file mode 100644
index 000000000000..a7b25e769950
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.h
@@ -0,0 +1,35 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_SW_H__
27#define __RTL8723BE_SW_H__
28
29int rtl8723be_init_sw_vars(struct ieee80211_hw *hw);
30void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw);
31void rtl8723be_init_var_map(struct ieee80211_hw *hw);
32bool rtl8723be_get_btc_status(void);
33
34
35#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/rtlwifi/rtl8723be/table.c
new file mode 100644
index 000000000000..4b283cde042e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.c
@@ -0,0 +1,572 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Created on 2010/ 5/18, 1:41
23 *
24 * Larry Finger <Larry.Finger@lwfinger.net>
25 *
26 *****************************************************************************/
27
28#include "table.h"
29u32 RTL8723BEPHY_REG_1TARRAY[] = {
30 0x800, 0x80040000,
31 0x804, 0x00000003,
32 0x808, 0x0000FC00,
33 0x80C, 0x0000000A,
34 0x810, 0x10001331,
35 0x814, 0x020C3D10,
36 0x818, 0x02200385,
37 0x81C, 0x00000000,
38 0x820, 0x01000100,
39 0x824, 0x00390204,
40 0x828, 0x00000000,
41 0x82C, 0x00000000,
42 0x830, 0x00000000,
43 0x834, 0x00000000,
44 0x838, 0x00000000,
45 0x83C, 0x00000000,
46 0x840, 0x00010000,
47 0x844, 0x00000000,
48 0x848, 0x00000000,
49 0x84C, 0x00000000,
50 0x850, 0x00000000,
51 0x854, 0x00000000,
52 0x858, 0x569A11A9,
53 0x85C, 0x01000014,
54 0x860, 0x66F60110,
55 0x864, 0x061F0649,
56 0x868, 0x00000000,
57 0x86C, 0x27272700,
58 0x870, 0x07000760,
59 0x874, 0x25004000,
60 0x878, 0x00000808,
61 0x87C, 0x00000000,
62 0x880, 0xB0000C1C,
63 0x884, 0x00000001,
64 0x888, 0x00000000,
65 0x88C, 0xCCC000C0,
66 0x890, 0x00000800,
67 0x894, 0xFFFFFFFE,
68 0x898, 0x40302010,
69 0x89C, 0x00706050,
70 0x900, 0x00000000,
71 0x904, 0x00000023,
72 0x908, 0x00000000,
73 0x90C, 0x81121111,
74 0x910, 0x00000002,
75 0x914, 0x00000201,
76 0x948, 0x00000000,
77 0xA00, 0x00D047C8,
78 0xA04, 0x80FF000C,
79 0xA08, 0x8C838300,
80 0xA0C, 0x2E7F120F,
81 0xA10, 0x9500BB78,
82 0xA14, 0x1114D028,
83 0xA18, 0x00881117,
84 0xA1C, 0x89140F00,
85 0xA20, 0x1A1B0000,
86 0xA24, 0x090E1317,
87 0xA28, 0x00000204,
88 0xA2C, 0x00D30000,
89 0xA70, 0x101FBF00,
90 0xA74, 0x00000007,
91 0xA78, 0x00000900,
92 0xA7C, 0x225B0606,
93 0xA80, 0x21806490,
94 0xB2C, 0x00000000,
95 0xC00, 0x48071D40,
96 0xC04, 0x03A05611,
97 0xC08, 0x000000E4,
98 0xC0C, 0x6C6C6C6C,
99 0xC10, 0x08800000,
100 0xC14, 0x40000100,
101 0xC18, 0x08800000,
102 0xC1C, 0x40000100,
103 0xC20, 0x00000000,
104 0xC24, 0x00000000,
105 0xC28, 0x00000000,
106 0xC2C, 0x00000000,
107 0xC30, 0x69E9AC44,
108 0xC34, 0x469652AF,
109 0xC38, 0x49795994,
110 0xC3C, 0x0A97971C,
111 0xC40, 0x1F7C403F,
112 0xC44, 0x000100B7,
113 0xC48, 0xEC020107,
114 0xC4C, 0x007F037F,
115 0xC50, 0x69553420,
116 0xC54, 0x43BC0094,
117 0xC58, 0x00023169,
118 0xC5C, 0x00250492,
119 0xC60, 0x00000000,
120 0xC64, 0x7112848B,
121 0xC68, 0x47C00BFF,
122 0xC6C, 0x00000036,
123 0xC70, 0x2C7F000D,
124 0xC74, 0x020610DB,
125 0xC78, 0x0000001F,
126 0xC7C, 0x00B91612,
127 0xC80, 0x390000E4,
128 0xC84, 0x20F60000,
129 0xC88, 0x40000100,
130 0xC8C, 0x20200000,
131 0xC90, 0x00020E1A,
132 0xC94, 0x00000000,
133 0xC98, 0x00020E1A,
134 0xC9C, 0x00007F7F,
135 0xCA0, 0x00000000,
136 0xCA4, 0x000300A0,
137 0xCA8, 0x00000000,
138 0xCAC, 0x00000000,
139 0xCB0, 0x00000000,
140 0xCB4, 0x00000000,
141 0xCB8, 0x00000000,
142 0xCBC, 0x28000000,
143 0xCC0, 0x00000000,
144 0xCC4, 0x00000000,
145 0xCC8, 0x00000000,
146 0xCCC, 0x00000000,
147 0xCD0, 0x00000000,
148 0xCD4, 0x00000000,
149 0xCD8, 0x64B22427,
150 0xCDC, 0x00766932,
151 0xCE0, 0x00222222,
152 0xCE4, 0x00000000,
153 0xCE8, 0x37644302,
154 0xCEC, 0x2F97D40C,
155 0xD00, 0x00000740,
156 0xD04, 0x40020401,
157 0xD08, 0x0000907F,
158 0xD0C, 0x20010201,
159 0xD10, 0xA0633333,
160 0xD14, 0x3333BC53,
161 0xD18, 0x7A8F5B6F,
162 0xD2C, 0xCC979975,
163 0xD30, 0x00000000,
164 0xD34, 0x80608000,
165 0xD38, 0x00000000,
166 0xD3C, 0x00127353,
167 0xD40, 0x00000000,
168 0xD44, 0x00000000,
169 0xD48, 0x00000000,
170 0xD4C, 0x00000000,
171 0xD50, 0x6437140A,
172 0xD54, 0x00000000,
173 0xD58, 0x00000282,
174 0xD5C, 0x30032064,
175 0xD60, 0x4653DE68,
176 0xD64, 0x04518A3C,
177 0xD68, 0x00002101,
178 0xD6C, 0x2A201C16,
179 0xD70, 0x1812362E,
180 0xD74, 0x322C2220,
181 0xD78, 0x000E3C24,
182 0xE00, 0x2D2D2D2D,
183 0xE04, 0x2D2D2D2D,
184 0xE08, 0x0390272D,
185 0xE10, 0x2D2D2D2D,
186 0xE14, 0x2D2D2D2D,
187 0xE18, 0x2D2D2D2D,
188 0xE1C, 0x2D2D2D2D,
189 0xE28, 0x00000000,
190 0xE30, 0x1000DC1F,
191 0xE34, 0x10008C1F,
192 0xE38, 0x02140102,
193 0xE3C, 0x681604C2,
194 0xE40, 0x01007C00,
195 0xE44, 0x01004800,
196 0xE48, 0xFB000000,
197 0xE4C, 0x000028D1,
198 0xE50, 0x1000DC1F,
199 0xE54, 0x10008C1F,
200 0xE58, 0x02140102,
201 0xE5C, 0x28160D05,
202 0xE60, 0x00000008,
203 0xE68, 0x001B2556,
204 0xE6C, 0x00C00096,
205 0xE70, 0x00C00096,
206 0xE74, 0x01000056,
207 0xE78, 0x01000014,
208 0xE7C, 0x01000056,
209 0xE80, 0x01000014,
210 0xE84, 0x00C00096,
211 0xE88, 0x01000056,
212 0xE8C, 0x00C00096,
213 0xED0, 0x00C00096,
214 0xED4, 0x00C00096,
215 0xED8, 0x00C00096,
216 0xEDC, 0x000000D6,
217 0xEE0, 0x000000D6,
218 0xEEC, 0x01C00016,
219 0xF14, 0x00000003,
220 0xF4C, 0x00000000,
221 0xF00, 0x00000300,
222 0x820, 0x01000100,
223 0x800, 0x83040000,
224};
225
226u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
227 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000,
228 0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800,
229 0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646,
230 0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840,
231 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
232 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
233};
234
235u32 RTL8723BE_RADIOA_1TARRAY[] = {
236 0x000, 0x00010000,
237 0x0B0, 0x000DFFE0,
238 0x0FE, 0x00000000,
239 0x0FE, 0x00000000,
240 0x0FE, 0x00000000,
241 0x0B1, 0x00000018,
242 0x0FE, 0x00000000,
243 0x0FE, 0x00000000,
244 0x0FE, 0x00000000,
245 0x0B2, 0x00084C00,
246 0x0B5, 0x0000D2CC,
247 0x0B6, 0x000925AA,
248 0x0B7, 0x00000010,
249 0x0B8, 0x0000907F,
250 0x05C, 0x00000002,
251 0x07C, 0x00000002,
252 0x07E, 0x00000005,
253 0x08B, 0x0006FC00,
254 0x0B0, 0x000FF9F0,
255 0x01C, 0x000739D2,
256 0x01E, 0x00000000,
257 0x0DF, 0x00000780,
258 0x050, 0x00067435,
259 0x051, 0x0006B04E,
260 0x052, 0x000007D2,
261 0x053, 0x00000000,
262 0x054, 0x00050400,
263 0x055, 0x0004026E,
264 0x0DD, 0x0000004C,
265 0x070, 0x00067435,
266 0x071, 0x0006B04E,
267 0x072, 0x000007D2,
268 0x073, 0x00000000,
269 0x074, 0x00050400,
270 0x075, 0x0004026E,
271 0x0EF, 0x00000100,
272 0x034, 0x0000ADD7,
273 0x035, 0x00005C00,
274 0x034, 0x00009DD4,
275 0x035, 0x00005000,
276 0x034, 0x00008DD1,
277 0x035, 0x00004400,
278 0x034, 0x00007DCE,
279 0x035, 0x00003800,
280 0x034, 0x00006CD1,
281 0x035, 0x00004400,
282 0x034, 0x00005CCE,
283 0x035, 0x00003800,
284 0x034, 0x000048CE,
285 0x035, 0x00004400,
286 0x034, 0x000034CE,
287 0x035, 0x00003800,
288 0x034, 0x00002451,
289 0x035, 0x00004400,
290 0x034, 0x0000144E,
291 0x035, 0x00003800,
292 0x034, 0x00000051,
293 0x035, 0x00004400,
294 0x0EF, 0x00000000,
295 0x0EF, 0x00000100,
296 0x0ED, 0x00000010,
297 0x044, 0x0000ADD7,
298 0x044, 0x00009DD4,
299 0x044, 0x00008DD1,
300 0x044, 0x00007DCE,
301 0x044, 0x00006CC1,
302 0x044, 0x00005CCE,
303 0x044, 0x000044D1,
304 0x044, 0x000034CE,
305 0x044, 0x00002451,
306 0x044, 0x0000144E,
307 0x044, 0x00000051,
308 0x0EF, 0x00000000,
309 0x0ED, 0x00000000,
310 0x0EF, 0x00002000,
311 0x03B, 0x000380EF,
312 0x03B, 0x000302FE,
313 0x03B, 0x00028CE6,
314 0x03B, 0x000200BC,
315 0x03B, 0x000188A5,
316 0x03B, 0x00010FBC,
317 0x03B, 0x00008F71,
318 0x03B, 0x00000900,
319 0x0EF, 0x00000000,
320 0x0ED, 0x00000001,
321 0x040, 0x000380EF,
322 0x040, 0x000302FE,
323 0x040, 0x00028CE6,
324 0x040, 0x000200BC,
325 0x040, 0x000188A5,
326 0x040, 0x00010FBC,
327 0x040, 0x00008F71,
328 0x040, 0x00000900,
329 0x0ED, 0x00000000,
330 0x082, 0x00080000,
331 0x083, 0x00008000,
332 0x084, 0x00048D80,
333 0x085, 0x00068000,
334 0x0A2, 0x00080000,
335 0x0A3, 0x00008000,
336 0x0A4, 0x00048D80,
337 0x0A5, 0x00068000,
338 0x000, 0x00033D80,
339};
340
341u32 RTL8723BEMAC_1T_ARRAY[] = {
342 0x02F, 0x00000030,
343 0x035, 0x00000000,
344 0x428, 0x0000000A,
345 0x429, 0x00000010,
346 0x430, 0x00000000,
347 0x431, 0x00000000,
348 0x432, 0x00000000,
349 0x433, 0x00000001,
350 0x434, 0x00000004,
351 0x435, 0x00000005,
352 0x436, 0x00000007,
353 0x437, 0x00000008,
354 0x43C, 0x00000004,
355 0x43D, 0x00000005,
356 0x43E, 0x00000007,
357 0x43F, 0x00000008,
358 0x440, 0x0000005D,
359 0x441, 0x00000001,
360 0x442, 0x00000000,
361 0x444, 0x00000010,
362 0x445, 0x00000000,
363 0x446, 0x00000000,
364 0x447, 0x00000000,
365 0x448, 0x00000000,
366 0x449, 0x000000F0,
367 0x44A, 0x0000000F,
368 0x44B, 0x0000003E,
369 0x44C, 0x00000010,
370 0x44D, 0x00000000,
371 0x44E, 0x00000000,
372 0x44F, 0x00000000,
373 0x450, 0x00000000,
374 0x451, 0x000000F0,
375 0x452, 0x0000000F,
376 0x453, 0x00000000,
377 0x456, 0x0000005E,
378 0x460, 0x00000066,
379 0x461, 0x00000066,
380 0x4C8, 0x000000FF,
381 0x4C9, 0x00000008,
382 0x4CC, 0x000000FF,
383 0x4CD, 0x000000FF,
384 0x4CE, 0x00000001,
385 0x500, 0x00000026,
386 0x501, 0x000000A2,
387 0x502, 0x0000002F,
388 0x503, 0x00000000,
389 0x504, 0x00000028,
390 0x505, 0x000000A3,
391 0x506, 0x0000005E,
392 0x507, 0x00000000,
393 0x508, 0x0000002B,
394 0x509, 0x000000A4,
395 0x50A, 0x0000005E,
396 0x50B, 0x00000000,
397 0x50C, 0x0000004F,
398 0x50D, 0x000000A4,
399 0x50E, 0x00000000,
400 0x50F, 0x00000000,
401 0x512, 0x0000001C,
402 0x514, 0x0000000A,
403 0x516, 0x0000000A,
404 0x525, 0x0000004F,
405 0x550, 0x00000010,
406 0x551, 0x00000010,
407 0x559, 0x00000002,
408 0x55C, 0x00000050,
409 0x55D, 0x000000FF,
410 0x605, 0x00000030,
411 0x608, 0x0000000E,
412 0x609, 0x0000002A,
413 0x620, 0x000000FF,
414 0x621, 0x000000FF,
415 0x622, 0x000000FF,
416 0x623, 0x000000FF,
417 0x624, 0x000000FF,
418 0x625, 0x000000FF,
419 0x626, 0x000000FF,
420 0x627, 0x000000FF,
421 0x638, 0x00000050,
422 0x63C, 0x0000000A,
423 0x63D, 0x0000000A,
424 0x63E, 0x0000000E,
425 0x63F, 0x0000000E,
426 0x640, 0x00000040,
427 0x642, 0x00000040,
428 0x643, 0x00000000,
429 0x652, 0x000000C8,
430 0x66E, 0x00000005,
431 0x700, 0x00000021,
432 0x701, 0x00000043,
433 0x702, 0x00000065,
434 0x703, 0x00000087,
435 0x708, 0x00000021,
436 0x709, 0x00000043,
437 0x70A, 0x00000065,
438 0x70B, 0x00000087,
439};
440
441u32 RTL8723BEAGCTAB_1TARRAY[] = {
442 0xC78, 0xFD000001,
443 0xC78, 0xFC010001,
444 0xC78, 0xFB020001,
445 0xC78, 0xFA030001,
446 0xC78, 0xF9040001,
447 0xC78, 0xF8050001,
448 0xC78, 0xF7060001,
449 0xC78, 0xF6070001,
450 0xC78, 0xF5080001,
451 0xC78, 0xF4090001,
452 0xC78, 0xF30A0001,
453 0xC78, 0xF20B0001,
454 0xC78, 0xF10C0001,
455 0xC78, 0xF00D0001,
456 0xC78, 0xEF0E0001,
457 0xC78, 0xEE0F0001,
458 0xC78, 0xED100001,
459 0xC78, 0xEC110001,
460 0xC78, 0xEB120001,
461 0xC78, 0xEA130001,
462 0xC78, 0xE9140001,
463 0xC78, 0xE8150001,
464 0xC78, 0xE7160001,
465 0xC78, 0xAA170001,
466 0xC78, 0xA9180001,
467 0xC78, 0xA8190001,
468 0xC78, 0xA71A0001,
469 0xC78, 0xA61B0001,
470 0xC78, 0xA51C0001,
471 0xC78, 0xA41D0001,
472 0xC78, 0xA31E0001,
473 0xC78, 0x671F0001,
474 0xC78, 0x66200001,
475 0xC78, 0x65210001,
476 0xC78, 0x64220001,
477 0xC78, 0x63230001,
478 0xC78, 0x62240001,
479 0xC78, 0x61250001,
480 0xC78, 0x47260001,
481 0xC78, 0x46270001,
482 0xC78, 0x45280001,
483 0xC78, 0x44290001,
484 0xC78, 0x432A0001,
485 0xC78, 0x422B0001,
486 0xC78, 0x292C0001,
487 0xC78, 0x282D0001,
488 0xC78, 0x272E0001,
489 0xC78, 0x262F0001,
490 0xC78, 0x25300001,
491 0xC78, 0x24310001,
492 0xC78, 0x09320001,
493 0xC78, 0x08330001,
494 0xC78, 0x07340001,
495 0xC78, 0x06350001,
496 0xC78, 0x05360001,
497 0xC78, 0x04370001,
498 0xC78, 0x03380001,
499 0xC78, 0x02390001,
500 0xC78, 0x013A0001,
501 0xC78, 0x003B0001,
502 0xC78, 0x003C0001,
503 0xC78, 0x003D0001,
504 0xC78, 0x003E0001,
505 0xC78, 0x003F0001,
506 0xC78, 0xFC400001,
507 0xC78, 0xFB410001,
508 0xC78, 0xFA420001,
509 0xC78, 0xF9430001,
510 0xC78, 0xF8440001,
511 0xC78, 0xF7450001,
512 0xC78, 0xF6460001,
513 0xC78, 0xF5470001,
514 0xC78, 0xF4480001,
515 0xC78, 0xF3490001,
516 0xC78, 0xF24A0001,
517 0xC78, 0xF14B0001,
518 0xC78, 0xF04C0001,
519 0xC78, 0xEF4D0001,
520 0xC78, 0xEE4E0001,
521 0xC78, 0xED4F0001,
522 0xC78, 0xEC500001,
523 0xC78, 0xEB510001,
524 0xC78, 0xEA520001,
525 0xC78, 0xE9530001,
526 0xC78, 0xE8540001,
527 0xC78, 0xE7550001,
528 0xC78, 0xE6560001,
529 0xC78, 0xE5570001,
530 0xC78, 0xAA580001,
531 0xC78, 0xA9590001,
532 0xC78, 0xA85A0001,
533 0xC78, 0xA75B0001,
534 0xC78, 0xA65C0001,
535 0xC78, 0xA55D0001,
536 0xC78, 0xA45E0001,
537 0xC78, 0x675F0001,
538 0xC78, 0x66600001,
539 0xC78, 0x65610001,
540 0xC78, 0x64620001,
541 0xC78, 0x63630001,
542 0xC78, 0x62640001,
543 0xC78, 0x61650001,
544 0xC78, 0x47660001,
545 0xC78, 0x46670001,
546 0xC78, 0x45680001,
547 0xC78, 0x44690001,
548 0xC78, 0x436A0001,
549 0xC78, 0x426B0001,
550 0xC78, 0x296C0001,
551 0xC78, 0x286D0001,
552 0xC78, 0x276E0001,
553 0xC78, 0x266F0001,
554 0xC78, 0x25700001,
555 0xC78, 0x24710001,
556 0xC78, 0x09720001,
557 0xC78, 0x08730001,
558 0xC78, 0x07740001,
559 0xC78, 0x06750001,
560 0xC78, 0x05760001,
561 0xC78, 0x04770001,
562 0xC78, 0x03780001,
563 0xC78, 0x02790001,
564 0xC78, 0x017A0001,
565 0xC78, 0x007B0001,
566 0xC78, 0x007C0001,
567 0xC78, 0x007D0001,
568 0xC78, 0x007E0001,
569 0xC78, 0x007F0001,
570 0xC50, 0x69553422,
571 0xC50, 0x69553420,
572};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/rtlwifi/rtl8723be/table.h
new file mode 100644
index 000000000000..932760a84827
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.h
@@ -0,0 +1,43 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Created on 2010/ 5/18, 1:41
23 *
24 * Larry Finger <Larry.Finger@lwfinger.net>
25 *
26 *****************************************************************************/
27
28#ifndef __RTL8723BE_TABLE__H_
29#define __RTL8723BE_TABLE__H_
30
31#include <linux/types.h>
32#define RTL8723BEPHY_REG_1TARRAYLEN 388
33extern u32 RTL8723BEPHY_REG_1TARRAY[];
34#define RTL8723BEPHY_REG_ARRAY_PGLEN 36
35extern u32 RTL8723BEPHY_REG_ARRAY_PG[];
36#define RTL8723BE_RADIOA_1TARRAYLEN 206
37extern u32 RTL8723BE_RADIOA_1TARRAY[];
38#define RTL8723BEMAC_1T_ARRAYLEN 194
39extern u32 RTL8723BEMAC_1T_ARRAY[];
40#define RTL8723BEAGCTAB_1TARRAYLEN 260
41extern u32 RTL8723BEAGCTAB_1TARRAY[];
42
43#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
new file mode 100644
index 000000000000..e0a0d8c8fed5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -0,0 +1,960 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../pci.h"
28#include "../base.h"
29#include "../stats.h"
30#include "reg.h"
31#include "def.h"
32#include "phy.h"
33#include "trx.h"
34#include "led.h"
35#include "dm.h"
36#include "phy.h"
37
38static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
39{
40 __le16 fc = rtl_get_fc(skb);
41
42 if (unlikely(ieee80211_is_beacon(fc)))
43 return QSLT_BEACON;
44 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
45 return QSLT_MGNT;
46
47 return skb->priority;
48}
49
50/* mac80211's rate_idx is like this:
51 *
52 * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
53 *
54 * B/G rate:
55 * (rx_status->flag & RX_FLAG_HT) = 0,
56 * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
57 *
58 * N rate:
59 * (rx_status->flag & RX_FLAG_HT) = 1,
60 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
61 *
62 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
63 * A rate:
64 * (rx_status->flag & RX_FLAG_HT) = 0,
65 * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
66 *
67 * N rate:
68 * (rx_status->flag & RX_FLAG_HT) = 1,
69 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
70 */
71static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
72 bool isht, u8 desc_rate)
73{
74 int rate_idx;
75
76 if (!isht) {
77 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
78 switch (desc_rate) {
79 case DESC92C_RATE1M:
80 rate_idx = 0;
81 break;
82 case DESC92C_RATE2M:
83 rate_idx = 1;
84 break;
85 case DESC92C_RATE5_5M:
86 rate_idx = 2;
87 break;
88 case DESC92C_RATE11M:
89 rate_idx = 3;
90 break;
91 case DESC92C_RATE6M:
92 rate_idx = 4;
93 break;
94 case DESC92C_RATE9M:
95 rate_idx = 5;
96 break;
97 case DESC92C_RATE12M:
98 rate_idx = 6;
99 break;
100 case DESC92C_RATE18M:
101 rate_idx = 7;
102 break;
103 case DESC92C_RATE24M:
104 rate_idx = 8;
105 break;
106 case DESC92C_RATE36M:
107 rate_idx = 9;
108 break;
109 case DESC92C_RATE48M:
110 rate_idx = 10;
111 break;
112 case DESC92C_RATE54M:
113 rate_idx = 11;
114 break;
115 default:
116 rate_idx = 0;
117 break;
118 }
119 } else {
120 switch (desc_rate) {
121 case DESC92C_RATE6M:
122 rate_idx = 0;
123 break;
124 case DESC92C_RATE9M:
125 rate_idx = 1;
126 break;
127 case DESC92C_RATE12M:
128 rate_idx = 2;
129 break;
130 case DESC92C_RATE18M:
131 rate_idx = 3;
132 break;
133 case DESC92C_RATE24M:
134 rate_idx = 4;
135 break;
136 case DESC92C_RATE36M:
137 rate_idx = 5;
138 break;
139 case DESC92C_RATE48M:
140 rate_idx = 6;
141 break;
142 case DESC92C_RATE54M:
143 rate_idx = 7;
144 break;
145 default:
146 rate_idx = 0;
147 break;
148 }
149 }
150 } else {
151 switch (desc_rate) {
152 case DESC92C_RATEMCS0:
153 rate_idx = 0;
154 break;
155 case DESC92C_RATEMCS1:
156 rate_idx = 1;
157 break;
158 case DESC92C_RATEMCS2:
159 rate_idx = 2;
160 break;
161 case DESC92C_RATEMCS3:
162 rate_idx = 3;
163 break;
164 case DESC92C_RATEMCS4:
165 rate_idx = 4;
166 break;
167 case DESC92C_RATEMCS5:
168 rate_idx = 5;
169 break;
170 case DESC92C_RATEMCS6:
171 rate_idx = 6;
172 break;
173 case DESC92C_RATEMCS7:
174 rate_idx = 7;
175 break;
176 case DESC92C_RATEMCS8:
177 rate_idx = 8;
178 break;
179 case DESC92C_RATEMCS9:
180 rate_idx = 9;
181 break;
182 case DESC92C_RATEMCS10:
183 rate_idx = 10;
184 break;
185 case DESC92C_RATEMCS11:
186 rate_idx = 11;
187 break;
188 case DESC92C_RATEMCS12:
189 rate_idx = 12;
190 break;
191 case DESC92C_RATEMCS13:
192 rate_idx = 13;
193 break;
194 case DESC92C_RATEMCS14:
195 rate_idx = 14;
196 break;
197 case DESC92C_RATEMCS15:
198 rate_idx = 15;
199 break;
200 default:
201 rate_idx = 0;
202 break;
203 }
204 }
205 return rate_idx;
206}
207
208static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
209 struct rtl_stats *pstatus, u8 *pdesc,
210 struct rx_fwinfo_8723be *p_drvinfo,
211 bool packet_match_bssid,
212 bool packet_toself,
213 bool packet_beacon)
214{
215 struct rtl_priv *rtlpriv = rtl_priv(hw);
216 struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
217 struct phy_sts_cck_8723e_t *cck_buf;
218 struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo;
219 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
220 char rx_pwr_all = 0, rx_pwr[4];
221 u8 rf_rx_num = 0, evm, pwdb_all;
222 u8 i, max_spatial_stream;
223 u32 rssi, total_rssi = 0;
224 bool is_cck = pstatus->is_cck;
225 u8 lan_idx, vga_idx;
226
227 /* Record it for next packet processing */
228 pstatus->packet_matchbssid = packet_match_bssid;
229 pstatus->packet_toself = packet_toself;
230 pstatus->packet_beacon = packet_beacon;
231 pstatus->rx_mimo_sig_qual[0] = -1;
232 pstatus->rx_mimo_sig_qual[1] = -1;
233
234 if (is_cck) {
235 u8 cck_highpwr;
236 u8 cck_agc_rpt;
237 /* CCK Driver info Structure is not the same as OFDM packet. */
238 cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
239 cck_agc_rpt = cck_buf->cck_agc_rpt;
240
241 /* (1)Hardware does not provide RSSI for CCK
242 * (2)PWDB, Average PWDB cacluated by
243 * hardware (for rate adaptive)
244 */
245 if (ppsc->rfpwr_state == ERFON)
246 cck_highpwr = (u8) rtl_get_bbreg(hw,
247 RFPGA0_XA_HSSIPARAMETER2,
248 BIT(9));
249 else
250 cck_highpwr = false;
251
252 lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
253 vga_idx = (cck_agc_rpt & 0x1f);
254 switch (lan_idx) {
255 case 7:
256 if (vga_idx <= 27)/*VGA_idx = 27~2*/
257 rx_pwr_all = -100 + 2 * (27 - vga_idx);
258 else
259 rx_pwr_all = -100;
260 break;
261 case 6:/*VGA_idx = 2~0*/
262 rx_pwr_all = -48 + 2 * (2 - vga_idx);
263 break;
264 case 5:/*VGA_idx = 7~5*/
265 rx_pwr_all = -42 + 2 * (7 - vga_idx);
266 break;
267 case 4:/*VGA_idx = 7~4*/
268 rx_pwr_all = -36 + 2 * (7 - vga_idx);
269 break;
270 case 3:/*VGA_idx = 7~0*/
271 rx_pwr_all = -24 + 2 * (7 - vga_idx);
272 break;
273 case 2:
274 if (cck_highpwr)/*VGA_idx = 5~0*/
275 rx_pwr_all = -12 + 2 * (5 - vga_idx);
276 else
277 rx_pwr_all = -6 + 2 * (5 - vga_idx);
278 break;
279 case 1:
280 rx_pwr_all = 8 - 2 * vga_idx;
281 break;
282 case 0:
283 rx_pwr_all = 14 - 2 * vga_idx;
284 break;
285 default:
286 break;
287 }
288 rx_pwr_all += 6;
289 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
290 /* CCK gain is smaller than OFDM/MCS gain, */
291 /* so we add gain diff by experiences,
292 * the val is 6
293 */
294 pwdb_all += 6;
295 if (pwdb_all > 100)
296 pwdb_all = 100;
297 /* modify the offset to make the same gain index with OFDM. */
298 if (pwdb_all > 34 && pwdb_all <= 42)
299 pwdb_all -= 2;
300 else if (pwdb_all > 26 && pwdb_all <= 34)
301 pwdb_all -= 6;
302 else if (pwdb_all > 14 && pwdb_all <= 26)
303 pwdb_all -= 8;
304 else if (pwdb_all > 4 && pwdb_all <= 14)
305 pwdb_all -= 4;
306 if (!cck_highpwr) {
307 if (pwdb_all >= 80)
308 pwdb_all = ((pwdb_all - 80) << 1) +
309 ((pwdb_all - 80) >> 1) + 80;
310 else if ((pwdb_all <= 78) && (pwdb_all >= 20))
311 pwdb_all += 3;
312 if (pwdb_all > 100)
313 pwdb_all = 100;
314 }
315
316 pstatus->rx_pwdb_all = pwdb_all;
317 pstatus->recvsignalpower = rx_pwr_all;
318
319 /* (3) Get Signal Quality (EVM) */
320 if (packet_match_bssid) {
321 u8 sq;
322
323 if (pstatus->rx_pwdb_all > 40) {
324 sq = 100;
325 } else {
326 sq = cck_buf->sq_rpt;
327 if (sq > 64)
328 sq = 0;
329 else if (sq < 20)
330 sq = 100;
331 else
332 sq = ((64 - sq) * 100) / 44;
333 }
334
335 pstatus->signalquality = sq;
336 pstatus->rx_mimo_sig_qual[0] = sq;
337 pstatus->rx_mimo_sig_qual[1] = -1;
338 }
339 } else {
340 rtlpriv->dm.rfpath_rxenable[0] = true;
341 rtlpriv->dm.rfpath_rxenable[1] = true;
342
343 /* (1)Get RSSI for HT rate */
344 for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
345 /* we will judge RF RX path now. */
346 if (rtlpriv->dm.rfpath_rxenable[i])
347 rf_rx_num++;
348
349 rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110;
350
351 /* Translate DBM to percentage. */
352 rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
353 total_rssi += rssi;
354
355 /* Get Rx snr value in DB */
356 rtlpriv->stats.rx_snr_db[i] =
357 (long)(p_drvinfo->rxsnr[i] / 2);
358
359 /* Record Signal Strength for next packet */
360 if (packet_match_bssid)
361 pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
362 }
363
364 /* (2)PWDB, Avg cacluated by hardware (for rate adaptive) */
365 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
366
367 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
368 pstatus->rx_pwdb_all = pwdb_all;
369 pstatus->rxpower = rx_pwr_all;
370 pstatus->recvsignalpower = rx_pwr_all;
371
372 /* (3)EVM of HT rate */
373 if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 &&
374 pstatus->rate <= DESC92C_RATEMCS15)
375 max_spatial_stream = 2;
376 else
377 max_spatial_stream = 1;
378
379 for (i = 0; i < max_spatial_stream; i++) {
380 evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
381
382 if (packet_match_bssid) {
383 /* Fill value in RFD, Get the first
384 * spatial stream only
385 */
386 if (i == 0)
387 pstatus->signalquality =
388 (u8) (evm & 0xff);
389 pstatus->rx_mimo_sig_qual[i] =
390 (u8) (evm & 0xff);
391 }
392 }
393 if (packet_match_bssid) {
394 for (i = RF90_PATH_A; i <= RF90_PATH_B; i++)
395 rtl_priv(hw)->dm.cfo_tail[i] =
396 (char)p_phystrpt->path_cfotail[i];
397
398 rtl_priv(hw)->dm.packet_count++;
399 if (rtl_priv(hw)->dm.packet_count == 0xffffffff)
400 rtl_priv(hw)->dm.packet_count = 0;
401 }
402 }
403
404 /* UI BSS List signal strength(in percentage),
405 * make it good looking, from 0~100.
406 */
407 if (is_cck)
408 pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
409 pwdb_all));
410 else if (rf_rx_num != 0)
411 pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
412 total_rssi /= rf_rx_num));
413 /*HW antenna diversity*/
414 rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->ant_sel;
415 rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->ant_sel_b;
416 rtldm->fat_table.antsel_rx_keep_2 = p_phystrpt->antsel_rx_keep_2;
417}
418
419static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw,
420 struct sk_buff *skb,
421 struct rtl_stats *pstatus,
422 u8 *pdesc,
423 struct rx_fwinfo_8723be *p_drvinfo)
424{
425 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
426 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
427 struct ieee80211_hdr *hdr;
428 u8 *tmp_buf;
429 u8 *praddr;
430 u8 *psaddr;
431 u16 fc, type;
432 bool packet_matchbssid, packet_toself, packet_beacon;
433
434 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
435
436 hdr = (struct ieee80211_hdr *)tmp_buf;
437 fc = le16_to_cpu(hdr->frame_control);
438 type = WLAN_FC_GET_TYPE(hdr->frame_control);
439 praddr = hdr->addr1;
440 psaddr = ieee80211_get_SA(hdr);
441 memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
442
443 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
444 (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ?
445 hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
446 hdr->addr2 : hdr->addr3)) &&
447 (!pstatus->hwerror) &&
448 (!pstatus->crc) && (!pstatus->icv));
449
450 packet_toself = packet_matchbssid &&
451 (!ether_addr_equal(praddr, rtlefuse->dev_addr));
452
453 /* YP: packet_beacon is not initialized,
454 * this assignment is neccesary,
455 * otherwise it counld be true in this case
456 * the situation is much worse in Kernel 3.10
457 */
458 if (ieee80211_is_beacon(hdr->frame_control))
459 packet_beacon = true;
460 else
461 packet_beacon = false;
462
463 if (packet_beacon && packet_matchbssid)
464 rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
465
466 _rtl8723be_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
467 packet_matchbssid,
468 packet_toself,
469 packet_beacon);
470
471 rtl_process_phyinfo(hw, tmp_buf, pstatus);
472}
473
474static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
475 u8 *virtualaddress)
476{
477 u32 dwtmp = 0;
478 memset(virtualaddress, 0, 8);
479
480 SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
481 if (ptcb_desc->empkt_num == 1) {
482 dwtmp = ptcb_desc->empkt_len[0];
483 } else {
484 dwtmp = ptcb_desc->empkt_len[0];
485 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
486 dwtmp += ptcb_desc->empkt_len[1];
487 }
488 SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
489
490 if (ptcb_desc->empkt_num <= 3) {
491 dwtmp = ptcb_desc->empkt_len[2];
492 } else {
493 dwtmp = ptcb_desc->empkt_len[2];
494 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
495 dwtmp += ptcb_desc->empkt_len[3];
496 }
497 SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
498 if (ptcb_desc->empkt_num <= 5) {
499 dwtmp = ptcb_desc->empkt_len[4];
500 } else {
501 dwtmp = ptcb_desc->empkt_len[4];
502 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
503 dwtmp += ptcb_desc->empkt_len[5];
504 }
505 SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
506 SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
507 if (ptcb_desc->empkt_num <= 7) {
508 dwtmp = ptcb_desc->empkt_len[6];
509 } else {
510 dwtmp = ptcb_desc->empkt_len[6];
511 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
512 dwtmp += ptcb_desc->empkt_len[7];
513 }
514 SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
515 if (ptcb_desc->empkt_num <= 9) {
516 dwtmp = ptcb_desc->empkt_len[8];
517 } else {
518 dwtmp = ptcb_desc->empkt_len[8];
519 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
520 dwtmp += ptcb_desc->empkt_len[9];
521 }
522 SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
523}
524
525bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
526 struct rtl_stats *status,
527 struct ieee80211_rx_status *rx_status,
528 u8 *pdesc, struct sk_buff *skb)
529{
530 struct rtl_priv *rtlpriv = rtl_priv(hw);
531 struct rx_fwinfo_8723be *p_drvinfo;
532 struct ieee80211_hdr *hdr;
533
534 u32 phystatus = GET_RX_DESC_PHYST(pdesc);
535 status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
536 if (status->packet_report_type == TX_REPORT2)
537 status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc);
538 else
539 status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
540 status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
541 RX_DRV_INFO_SIZE_UNIT;
542 status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
543 status->icv = (u16) GET_RX_DESC_ICV(pdesc);
544 status->crc = (u16) GET_RX_DESC_CRC32(pdesc);
545 status->hwerror = (status->crc | status->icv);
546 status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
547 status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
548 status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
549 status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
550 status->isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
551 if (status->packet_report_type == NORMAL_RX)
552 status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
553 status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
554 status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
555
556 status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate);
557
558 status->macid = GET_RX_DESC_MACID(pdesc);
559 if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
560 status->wake_match = BIT(2);
561 else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
562 status->wake_match = BIT(1);
563 else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
564 status->wake_match = BIT(0);
565 else
566 status->wake_match = 0;
567 if (status->wake_match)
568 RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
569 "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
570 status->wake_match);
571 rx_status->freq = hw->conf.chandef.chan->center_freq;
572 rx_status->band = hw->conf.chandef.chan->band;
573
574
575 hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size +
576 status->rx_bufshift);
577
578 if (status->crc)
579 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
580
581 if (status->rx_is40Mhzpacket)
582 rx_status->flag |= RX_FLAG_40MHZ;
583
584 if (status->is_ht)
585 rx_status->flag |= RX_FLAG_HT;
586
587 rx_status->flag |= RX_FLAG_MACTIME_START;
588
589 /* hw will set status->decrypted true, if it finds the
590 * frame is open data frame or mgmt frame.
591 * So hw will not decryption robust managment frame
592 * for IEEE80211w but still set status->decrypted
593 * true, so here we should set it back to undecrypted
594 * for IEEE80211w frame, and mac80211 sw will help
595 * to decrypt it
596 */
597 if (status->decrypted) {
598 if (!hdr) {
599 WARN_ON_ONCE(true);
600 pr_err("decrypted is true but hdr NULL in skb %p\n",
601 rtl_get_hdr(skb));
602 return false;
603 }
604
605 if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
606 (ieee80211_has_protected(hdr->frame_control)))
607 rx_status->flag &= ~RX_FLAG_DECRYPTED;
608 else
609 rx_status->flag |= RX_FLAG_DECRYPTED;
610 }
611
612 /* rate_idx: index of data rate into band's
613 * supported rates or MCS index if HT rates
614 * are use (RX_FLAG_HT)
615 * Notice: this is diff with windows define
616 */
617 rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht,
618 status->rate);
619
620 rx_status->mactime = status->timestamp_low;
621 if (phystatus) {
622 p_drvinfo = (struct rx_fwinfo_8723be *)(skb->data +
623 status->rx_bufshift);
624
625 _rtl8723be_translate_rx_signal_stuff(hw, skb, status,
626 pdesc, p_drvinfo);
627 }
628
629 /*rx_status->qual = status->signal; */
630 rx_status->signal = status->recvsignalpower + 10;
631 if (status->packet_report_type == TX_REPORT2) {
632 status->macid_valid_entry[0] =
633 GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
634 status->macid_valid_entry[1] =
635 GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
636 }
637 return true;
638}
639
640void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
641 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
642 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
643 struct ieee80211_sta *sta, struct sk_buff *skb,
644 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
645{
646 struct rtl_priv *rtlpriv = rtl_priv(hw);
647 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
648 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
649 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
650 u8 *pdesc = pdesc_tx;
651 u16 seq_number;
652 __le16 fc = hdr->frame_control;
653 unsigned int buf_len = 0;
654 unsigned int skb_len = skb->len;
655 u8 fw_qsel = _rtl8723be_map_hwqueue_to_fwqueue(skb, hw_queue);
656 bool firstseg = ((hdr->seq_ctrl &
657 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
658 bool lastseg = ((hdr->frame_control &
659 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
660 dma_addr_t mapping;
661 u8 bw_40 = 0;
662 u8 short_gi = 0;
663
664 if (mac->opmode == NL80211_IFTYPE_STATION) {
665 bw_40 = mac->bw_40;
666 } else if (mac->opmode == NL80211_IFTYPE_AP ||
667 mac->opmode == NL80211_IFTYPE_ADHOC) {
668 if (sta)
669 bw_40 = sta->ht_cap.cap &
670 IEEE80211_HT_CAP_SUP_WIDTH_20_40;
671 }
672 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
673 rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
674 /* reserve 8 byte for AMPDU early mode */
675 if (rtlhal->earlymode_enable) {
676 skb_push(skb, EM_HDR_LEN);
677 memset(skb->data, 0, EM_HDR_LEN);
678 }
679 buf_len = skb->len;
680 mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
681 PCI_DMA_TODEVICE);
682 if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
683 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error");
684 return;
685 }
686 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be));
687 if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
688 firstseg = true;
689 lastseg = true;
690 }
691 if (firstseg) {
692 if (rtlhal->earlymode_enable) {
693 SET_TX_DESC_PKT_OFFSET(pdesc, 1);
694 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
695 EM_HDR_LEN);
696 if (ptcb_desc->empkt_num) {
697 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
698 "Insert 8 byte.pTcb->EMPktNum:%d\n",
699 ptcb_desc->empkt_num);
700 _rtl8723be_insert_emcontent(ptcb_desc,
701 (u8 *)(skb->data));
702 }
703 } else {
704 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
705 }
706
707 /* ptcb_desc->use_driver_rate = true; */
708 SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
709 if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
710 short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
711 else
712 short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
713
714 SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
715
716 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
717 SET_TX_DESC_AGG_ENABLE(pdesc, 1);
718 SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
719 }
720 SET_TX_DESC_SEQ(pdesc, seq_number);
721 SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
722 !ptcb_desc->cts_enable) ?
723 1 : 0));
724 SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
725 SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ?
726 1 : 0));
727
728 SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
729
730 SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
731 SET_TX_DESC_RTS_SHORT(pdesc,
732 ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
733 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
734 (ptcb_desc->rts_use_shortgi ? 1 : 0)));
735
736 if (ptcb_desc->btx_enable_sw_calc_duration)
737 SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
738
739 if (bw_40) {
740 if (ptcb_desc->packet_bw) {
741 SET_TX_DESC_DATA_BW(pdesc, 1);
742 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
743 } else {
744 SET_TX_DESC_DATA_BW(pdesc, 0);
745 SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc);
746 }
747 } else {
748 SET_TX_DESC_DATA_BW(pdesc, 0);
749 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
750 }
751
752 SET_TX_DESC_LINIP(pdesc, 0);
753 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
754 if (sta) {
755 u8 ampdu_density = sta->ht_cap.ampdu_density;
756 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
757 }
758 if (info->control.hw_key) {
759 struct ieee80211_key_conf *keyconf =
760 info->control.hw_key;
761 switch (keyconf->cipher) {
762 case WLAN_CIPHER_SUITE_WEP40:
763 case WLAN_CIPHER_SUITE_WEP104:
764 case WLAN_CIPHER_SUITE_TKIP:
765 SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
766 break;
767 case WLAN_CIPHER_SUITE_CCMP:
768 SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
769 break;
770 default:
771 SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
772 break;
773 }
774 }
775
776 SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
777 SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
778 SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
779 SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
780 1 : 0);
781 SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
782
783 if (ieee80211_is_data_qos(fc)) {
784 if (mac->rdg_en) {
785 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
786 "Enable RDG function.\n");
787 SET_TX_DESC_RDG_ENABLE(pdesc, 1);
788 SET_TX_DESC_HTC(pdesc, 1);
789 }
790 }
791 }
792
793 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
794 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
795 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
796 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
797 SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
798 SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
799
800 if (!ieee80211_is_data_qos(fc)) {
801 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
802 SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
803 }
804 SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
805 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
806 is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
807 SET_TX_DESC_BMC(pdesc, 1);
808 }
809 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
810}
811
812void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
813 bool b_firstseg, bool b_lastseg,
814 struct sk_buff *skb)
815{
816 struct rtl_priv *rtlpriv = rtl_priv(hw);
817 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
818 u8 fw_queue = QSLT_BEACON;
819
820 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
821 skb->data, skb->len,
822 PCI_DMA_TODEVICE);
823
824 if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
825 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
826 "DMA mapping error");
827 return;
828 }
829 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
830
831 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
832
833 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
834
835 SET_TX_DESC_SEQ(pdesc, 0);
836
837 SET_TX_DESC_LINIP(pdesc, 0);
838
839 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
840
841 SET_TX_DESC_FIRST_SEG(pdesc, 1);
842 SET_TX_DESC_LAST_SEG(pdesc, 1);
843
844 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
845
846 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
847
848 SET_TX_DESC_RATE_ID(pdesc, 0);
849 SET_TX_DESC_MACID(pdesc, 0);
850
851 SET_TX_DESC_OWN(pdesc, 1);
852
853 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
854
855 SET_TX_DESC_FIRST_SEG(pdesc, 1);
856 SET_TX_DESC_LAST_SEG(pdesc, 1);
857
858 SET_TX_DESC_USE_RATE(pdesc, 1);
859}
860
861void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
862 u8 desc_name, u8 *val)
863{
864 if (istx) {
865 switch (desc_name) {
866 case HW_DESC_OWN:
867 SET_TX_DESC_OWN(pdesc, 1);
868 break;
869 case HW_DESC_TX_NEXTDESC_ADDR:
870 SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
871 break;
872 default:
873 RT_ASSERT(false, "ERR txdesc :%d not process\n",
874 desc_name);
875 break;
876 }
877 } else {
878 switch (desc_name) {
879 case HW_DESC_RXOWN:
880 SET_RX_DESC_OWN(pdesc, 1);
881 break;
882 case HW_DESC_RXBUFF_ADDR:
883 SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
884 break;
885 case HW_DESC_RXPKT_LEN:
886 SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
887 break;
888 case HW_DESC_RXERO:
889 SET_RX_DESC_EOR(pdesc, 1);
890 break;
891 default:
892 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
893 desc_name);
894 break;
895 }
896 }
897}
898
899u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
900{
901 u32 ret = 0;
902
903 if (istx) {
904 switch (desc_name) {
905 case HW_DESC_OWN:
906 ret = GET_TX_DESC_OWN(pdesc);
907 break;
908 case HW_DESC_TXBUFF_ADDR:
909 ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
910 break;
911 default:
912 RT_ASSERT(false, "ERR txdesc :%d not process\n",
913 desc_name);
914 break;
915 }
916 } else {
917 switch (desc_name) {
918 case HW_DESC_OWN:
919 ret = GET_RX_DESC_OWN(pdesc);
920 break;
921 case HW_DESC_RXPKT_LEN:
922 ret = GET_RX_DESC_PKT_LEN(pdesc);
923 break;
924 default:
925 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
926 desc_name);
927 break;
928 }
929 }
930 return ret;
931}
932
933bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
934 u8 hw_queue, u16 index)
935{
936 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
937 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
938 u8 *entry = (u8 *)(&ring->desc[ring->idx]);
939 u8 own = (u8) rtl8723be_get_desc(entry, true, HW_DESC_OWN);
940
941 /*beacon packet will only use the first
942 *descriptor by default, and the own may not
943 *be cleared by the hardware
944 */
945 if (own)
946 return false;
947 else
948 return true;
949}
950
951void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
952{
953 struct rtl_priv *rtlpriv = rtl_priv(hw);
954 if (hw_queue == BEACON_QUEUE) {
955 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
956 } else {
957 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
958 BIT(0) << (hw_queue));
959 }
960}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h
new file mode 100644
index 000000000000..102f33dcc988
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h
@@ -0,0 +1,617 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __RTL8723BE_TRX_H__
27#define __RTL8723BE_TRX_H__
28
29#define TX_DESC_SIZE 40
30#define TX_DESC_AGGR_SUBFRAME_SIZE 32
31
32#define RX_DESC_SIZE 32
33#define RX_DRV_INFO_SIZE_UNIT 8
34
35#define TX_DESC_NEXT_DESC_OFFSET 40
36#define USB_HWDESC_HEADER_LEN 40
37#define CRCLENGTH 4
38
39#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
40 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
41#define SET_TX_DESC_OFFSET(__pdesc, __val) \
42 SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
43#define SET_TX_DESC_BMC(__pdesc, __val) \
44 SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
45#define SET_TX_DESC_HTC(__pdesc, __val) \
46 SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
47#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
48 SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
49#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
50 SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
51#define SET_TX_DESC_LINIP(__pdesc, __val) \
52 SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
53#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
54 SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
55#define SET_TX_DESC_GF(__pdesc, __val) \
56 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
57#define SET_TX_DESC_OWN(__pdesc, __val) \
58 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
59
60#define GET_TX_DESC_PKT_SIZE(__pdesc) \
61 LE_BITS_TO_4BYTE(__pdesc, 0, 16)
62#define GET_TX_DESC_OFFSET(__pdesc) \
63 LE_BITS_TO_4BYTE(__pdesc, 16, 8)
64#define GET_TX_DESC_BMC(__pdesc) \
65 LE_BITS_TO_4BYTE(__pdesc, 24, 1)
66#define GET_TX_DESC_HTC(__pdesc) \
67 LE_BITS_TO_4BYTE(__pdesc, 25, 1)
68#define GET_TX_DESC_LAST_SEG(__pdesc) \
69 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
70#define GET_TX_DESC_FIRST_SEG(__pdesc) \
71 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
72#define GET_TX_DESC_LINIP(__pdesc) \
73 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
74#define GET_TX_DESC_NO_ACM(__pdesc) \
75 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
76#define GET_TX_DESC_GF(__pdesc) \
77 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
78#define GET_TX_DESC_OWN(__pdesc) \
79 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
80
81#define SET_TX_DESC_MACID(__pdesc, __val) \
82 SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
83#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
84 SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
85#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
86 SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
87#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
88 SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
89#define SET_TX_DESC_PIFS(__pdesc, __val) \
90 SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
91#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
92 SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
93#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
94 SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
95#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
96 SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
97#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
98 SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
99
100
101#define SET_TX_DESC_PAID(__pdesc, __val) \
102 SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
103#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \
104 SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
105#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
106 SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
107#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
108 SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
109#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
110 SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
111#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
112 SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
113#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
114 SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
115#define SET_TX_DESC_RAW(__pdesc, __val) \
116 SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
117#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
118 SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
119#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
120 SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
121#define SET_TX_DESC_BT_INT(__pdesc, __val) \
122 SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
123#define SET_TX_DESC_GID(__pdesc, __val) \
124 SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
125
126
127#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \
128 SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
129#define SET_TX_DESC_CHK_EN(__pdesc, __val) \
130 SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
131#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \
132 SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
133#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \
134 SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
135#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
136 SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
137#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
138 SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
139#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
140 SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
141#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
142 SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
143#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
144 SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
145#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
146 SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
147#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
148 SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
149#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
150 SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
151#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
152 SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
153#define SET_TX_DESC_NDPA(__pdesc, __val) \
154 SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
155#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \
156 SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
157
158
159#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
160 SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
161#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
162 SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
163#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
164 SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
165#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
166 SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
167#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
168 SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
169#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
170 SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
171
172
173#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
174 SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
175#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
176 SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val)
177#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
178 SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
179#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \
180 SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
181#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \
182 SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
183#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \
184 SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
185#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
186 SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
187#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
188 SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
189
190
191#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
192 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
193
194#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
195 LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
196
197#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
198 SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
199
200#define SET_TX_DESC_SEQ(__pdesc, __val) \
201 SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
202
203#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
204 SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
205
206#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
207 LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
208
209
210#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
211 SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
212
213#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
214 LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
215
216#define GET_RX_DESC_PKT_LEN(__pdesc) \
217 LE_BITS_TO_4BYTE(__pdesc, 0, 14)
218#define GET_RX_DESC_CRC32(__pdesc) \
219 LE_BITS_TO_4BYTE(__pdesc, 14, 1)
220#define GET_RX_DESC_ICV(__pdesc) \
221 LE_BITS_TO_4BYTE(__pdesc, 15, 1)
222#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
223 LE_BITS_TO_4BYTE(__pdesc, 16, 4)
224#define GET_RX_DESC_SECURITY(__pdesc) \
225 LE_BITS_TO_4BYTE(__pdesc, 20, 3)
226#define GET_RX_DESC_QOS(__pdesc) \
227 LE_BITS_TO_4BYTE(__pdesc, 23, 1)
228#define GET_RX_DESC_SHIFT(__pdesc) \
229 LE_BITS_TO_4BYTE(__pdesc, 24, 2)
230#define GET_RX_DESC_PHYST(__pdesc) \
231 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
232#define GET_RX_DESC_SWDEC(__pdesc) \
233 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
234#define GET_RX_DESC_LS(__pdesc) \
235 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
236#define GET_RX_DESC_FS(__pdesc) \
237 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
238#define GET_RX_DESC_EOR(__pdesc) \
239 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
240#define GET_RX_DESC_OWN(__pdesc) \
241 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
242
243#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
244 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
245#define SET_RX_DESC_EOR(__pdesc, __val) \
246 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
247#define SET_RX_DESC_OWN(__pdesc, __val) \
248 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
249
250#define GET_RX_DESC_MACID(__pdesc) \
251 LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
252#define GET_RX_DESC_TID(__pdesc) \
253 LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
254#define GET_RX_DESC_AMSDU(__pdesc) \
255 LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
256#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \
257 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
258#define GET_RX_DESC_PAGGR(__pdesc) \
259 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
260#define GET_RX_DESC_A1_FIT(__pdesc) \
261 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
262#define GET_RX_DESC_CHKERR(__pdesc) \
263 LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
264#define GET_RX_DESC_IPVER(__pdesc) \
265 LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
266#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \
267 LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
268#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \
269 LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
270#define GET_RX_DESC_PAM(__pdesc) \
271 LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
272#define GET_RX_DESC_PWR(__pdesc) \
273 LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
274#define GET_RX_DESC_MD(__pdesc) \
275 LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
276#define GET_RX_DESC_MF(__pdesc) \
277 LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
278#define GET_RX_DESC_TYPE(__pdesc) \
279 LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
280#define GET_RX_DESC_MC(__pdesc) \
281 LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
282#define GET_RX_DESC_BC(__pdesc) \
283 LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
284
285
286#define GET_RX_DESC_SEQ(__pdesc) \
287 LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
288#define GET_RX_DESC_FRAG(__pdesc) \
289 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
290#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \
291 LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
292#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \
293 LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
294#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
295 LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
296
297
298#define GET_RX_DESC_RXMCS(__pdesc) \
299 LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
300#define GET_RX_DESC_RXHT(__pdesc) \
301 LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
302#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
303 LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
304#define GET_RX_DESC_HTC(__pdesc) \
305 LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
306#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
307 LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
308#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
309 LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
310
311#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
312 LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
313#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
314 LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
315#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
316 LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
317
318#define GET_RX_DESC_SPLCP(__pdesc) \
319 LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
320#define GET_RX_STATUS_DESC_LDPC(__pdesc) \
321 LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
322#define GET_RX_STATUS_DESC_STBC(__pdesc) \
323 LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
324#define GET_RX_DESC_BW(__pdesc) \
325 LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
326
327#define GET_RX_DESC_TSFL(__pdesc) \
328 LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
329
330#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
331 LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
332#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
333 LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
334
335#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
336 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
337#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
338 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
339
340
341/* TX report 2 format in Rx desc*/
342
343#define GET_RX_RPT2_DESC_PKT_LEN(__rxstatusdesc) \
344 LE_BITS_TO_4BYTE(__rxstatusdesc, 0, 9)
345#define GET_RX_RPT2_DESC_MACID_VALID_1(__rxstatusdesc) \
346 LE_BITS_TO_4BYTE(__rxstatusdesc+16, 0, 32)
347#define GET_RX_RPT2_DESC_MACID_VALID_2(__rxstatusdesc) \
348 LE_BITS_TO_4BYTE(__rxstatusdesc+20, 0, 32)
349
350#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
351 SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
352#define SET_EARLYMODE_LEN0(__paddr, __value) \
353 SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
354#define SET_EARLYMODE_LEN1(__paddr, __value) \
355 SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
356#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
357 SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
358#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
359 SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
360#define SET_EARLYMODE_LEN3(__paddr, __value) \
361 SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
362#define SET_EARLYMODE_LEN4(__paddr, __value) \
363 SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
364
365#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
366do { \
367 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
368 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
369 else \
370 memset(__pdesc, 0, _size); \
371} while (0)
372
373struct phy_rx_agc_info_t {
374 #ifdef __LITTLE_ENDIAN
375 u8 gain:7, trsw:1;
376 #else
377 u8 trsw:1, gain:7;
378 #endif
379};
380struct phy_status_rpt {
381 struct phy_rx_agc_info_t path_agc[2];
382 u8 ch_corr[2];
383 u8 cck_sig_qual_ofdm_pwdb_all;
384 u8 cck_agc_rpt_ofdm_cfosho_a;
385 u8 cck_rpt_b_ofdm_cfosho_b;
386 u8 rsvd_1;/* ch_corr_msb; */
387 u8 noise_power_db_msb;
388 char path_cfotail[2];
389 u8 pcts_mask[2];
390 char stream_rxevm[2];
391 u8 path_rxsnr[2];
392 u8 noise_power_db_lsb;
393 u8 rsvd_2[3];
394 u8 stream_csi[2];
395 u8 stream_target_csi[2];
396 u8 sig_evm;
397 u8 rsvd_3;
398#ifdef __LITTLE_ENDIAN
399 u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
400 u8 sgi_en:1;
401 u8 rxsc:2;
402 u8 idle_long:1;
403 u8 r_ant_train_en:1;
404 u8 ant_sel_b:1;
405 u8 ant_sel:1;
406#else /* _BIG_ENDIAN_ */
407 u8 ant_sel:1;
408 u8 ant_sel_b:1;
409 u8 r_ant_train_en:1;
410 u8 idle_long:1;
411 u8 rxsc:2;
412 u8 sgi_en:1;
413 u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
414#endif
415} __packed;
416
417struct rx_fwinfo_8723be {
418 u8 gain_trsw[4];
419 u8 pwdb_all;
420 u8 cfosho[4];
421 u8 cfotail[4];
422 char rxevm[2];
423 char rxsnr[4];
424 u8 pdsnr[2];
425 u8 csi_current[2];
426 u8 csi_target[2];
427 u8 sigevm;
428 u8 max_ex_pwr;
429 u8 ex_intf_flag:1;
430 u8 sgi_en:1;
431 u8 rxsc:2;
432 u8 reserve:4;
433} __packed;
434
435struct tx_desc_8723be {
436 u32 pktsize:16;
437 u32 offset:8;
438 u32 bmc:1;
439 u32 htc:1;
440 u32 lastseg:1;
441 u32 firstseg:1;
442 u32 linip:1;
443 u32 noacm:1;
444 u32 gf:1;
445 u32 own:1;
446
447 u32 macid:6;
448 u32 rsvd0:2;
449 u32 queuesel:5;
450 u32 rd_nav_ext:1;
451 u32 lsig_txop_en:1;
452 u32 pifs:1;
453 u32 rateid:4;
454 u32 nav_usehdr:1;
455 u32 en_descid:1;
456 u32 sectype:2;
457 u32 pktoffset:8;
458
459 u32 rts_rc:6;
460 u32 data_rc:6;
461 u32 agg_en:1;
462 u32 rdg_en:1;
463 u32 bar_retryht:2;
464 u32 agg_break:1;
465 u32 morefrag:1;
466 u32 raw:1;
467 u32 ccx:1;
468 u32 ampdudensity:3;
469 u32 bt_int:1;
470 u32 ant_sela:1;
471 u32 ant_selb:1;
472 u32 txant_cck:2;
473 u32 txant_l:2;
474 u32 txant_ht:2;
475
476 u32 nextheadpage:8;
477 u32 tailpage:8;
478 u32 seq:12;
479 u32 cpu_handle:1;
480 u32 tag1:1;
481 u32 trigger_int:1;
482 u32 hwseq_en:1;
483
484 u32 rtsrate:5;
485 u32 apdcfe:1;
486 u32 qos:1;
487 u32 hwseq_ssn:1;
488 u32 userrate:1;
489 u32 dis_rtsfb:1;
490 u32 dis_datafb:1;
491 u32 cts2self:1;
492 u32 rts_en:1;
493 u32 hwrts_en:1;
494 u32 portid:1;
495 u32 pwr_status:3;
496 u32 waitdcts:1;
497 u32 cts2ap_en:1;
498 u32 txsc:2;
499 u32 stbc:2;
500 u32 txshort:1;
501 u32 txbw:1;
502 u32 rtsshort:1;
503 u32 rtsbw:1;
504 u32 rtssc:2;
505 u32 rtsstbc:2;
506
507 u32 txrate:6;
508 u32 shortgi:1;
509 u32 ccxt:1;
510 u32 txrate_fb_lmt:5;
511 u32 rtsrate_fb_lmt:4;
512 u32 retrylmt_en:1;
513 u32 txretrylmt:6;
514 u32 usb_txaggnum:8;
515
516 u32 txagca:5;
517 u32 txagcb:5;
518 u32 usemaxlen:1;
519 u32 maxaggnum:5;
520 u32 mcsg1maxlen:4;
521 u32 mcsg2maxlen:4;
522 u32 mcsg3maxlen:4;
523 u32 mcs7sgimaxlen:4;
524
525 u32 txbuffersize:16;
526 u32 sw_offset30:8;
527 u32 sw_offset31:4;
528 u32 rsvd1:1;
529 u32 antsel_c:1;
530 u32 null_0:1;
531 u32 null_1:1;
532
533 u32 txbuffaddr;
534 u32 txbufferaddr64;
535 u32 nextdescaddress;
536 u32 nextdescaddress64;
537
538 u32 reserve_pass_pcie_mm_limit[4];
539} __packed;
540
541struct rx_desc_8723be {
542 u32 length:14;
543 u32 crc32:1;
544 u32 icverror:1;
545 u32 drv_infosize:4;
546 u32 security:3;
547 u32 qos:1;
548 u32 shift:2;
549 u32 phystatus:1;
550 u32 swdec:1;
551 u32 lastseg:1;
552 u32 firstseg:1;
553 u32 eor:1;
554 u32 own:1;
555
556 u32 macid:6;
557 u32 tid:4;
558 u32 hwrsvd:5;
559 u32 paggr:1;
560 u32 faggr:1;
561 u32 a1_fit:4;
562 u32 a2_fit:4;
563 u32 pam:1;
564 u32 pwr:1;
565 u32 moredata:1;
566 u32 morefrag:1;
567 u32 type:2;
568 u32 mc:1;
569 u32 bc:1;
570
571 u32 seq:12;
572 u32 frag:4;
573 u32 nextpktlen:14;
574 u32 nextind:1;
575 u32 rsvd:1;
576
577 u32 rxmcs:6;
578 u32 rxht:1;
579 u32 amsdu:1;
580 u32 splcp:1;
581 u32 bandwidth:1;
582 u32 htc:1;
583 u32 tcpchk_rpt:1;
584 u32 ipcchk_rpt:1;
585 u32 tcpchk_valid:1;
586 u32 hwpcerr:1;
587 u32 hwpcind:1;
588 u32 iv0:16;
589
590 u32 iv1;
591
592 u32 tsfl;
593
594 u32 bufferaddress;
595 u32 bufferaddress64;
596
597} __packed;
598
599void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
600 struct ieee80211_hdr *hdr, u8 *pdesc,
601 u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
602 struct ieee80211_sta *sta, struct sk_buff *skb,
603 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
604bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
605 struct rtl_stats *status,
606 struct ieee80211_rx_status *rx_status,
607 u8 *pdesc, struct sk_buff *skb);
608void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
609 u8 desc_name, u8 *val);
610u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name);
611bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
612 u8 hw_queue, u16 index);
613void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
614void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
615 bool b_firstseg, bool b_lastseg,
616 struct sk_buff *skb);
617#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/rtlwifi/rtl8723com/Makefile
new file mode 100644
index 000000000000..345a68adcf38
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/Makefile
@@ -0,0 +1,9 @@
1rtl8723-common-objs := \
2 main.o \
3 dm_common.o \
4 fw_common.o \
5 phy_common.o
6
7obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
8
9ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
new file mode 100644
index 000000000000..4e254b72bf45
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
@@ -0,0 +1,65 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "dm_common.h"
28#include "../rtl8723ae/dm.h"
29#include <linux/module.h>
30
31/* These routines are common to RTL8723AE and RTL8723bE */
32
33void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
34{
35 struct rtl_priv *rtlpriv = rtl_priv(hw);
36
37 rtlpriv->dm.dynamic_txpower_enable = false;
38
39 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
40 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
41}
42EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_txpower);
43
44void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw)
45{
46 struct rtl_priv *rtlpriv = rtl_priv(hw);
47
48 rtlpriv->dm.current_turbo_edca = false;
49 rtlpriv->dm.is_any_nonbepkts = false;
50 rtlpriv->dm.is_cur_rdlstate = false;
51}
52EXPORT_SYMBOL_GPL(rtl8723_dm_init_edca_turbo);
53
54void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
55{
56 struct rtl_priv *rtlpriv = rtl_priv(hw);
57
58 rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
59 rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
60 rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
61 rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
62 rtlpriv->dm_pstable.rssi_val_min = 0;
63 rtlpriv->dm_pstable.initialize = 0;
64}
65EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_bb_powersaving);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
new file mode 100644
index 000000000000..5c1b94ce2f86
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
@@ -0,0 +1,33 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __DM_COMMON_H__
27#define __DM_COMMON_H__
28
29void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw);
30void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw);
31void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw);
32
33#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
new file mode 100644
index 000000000000..540278ff462b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
@@ -0,0 +1,329 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "../pci.h"
28#include "../base.h"
29#include "fw_common.h"
30#include <linux/module.h>
31
32void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable)
33{
34 struct rtl_priv *rtlpriv = rtl_priv(hw);
35 u8 tmp;
36
37 if (enable) {
38 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
39 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
40
41 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
42 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
43
44 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
45 rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
46 } else {
47 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
48 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
49
50 rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
51 }
52}
53EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download);
54
55void rtl8723_fw_block_write(struct ieee80211_hw *hw,
56 const u8 *buffer, u32 size)
57{
58 struct rtl_priv *rtlpriv = rtl_priv(hw);
59 u32 blocksize = sizeof(u32);
60 u8 *bufferptr = (u8 *)buffer;
61 u32 *pu4byteptr = (u32 *)buffer;
62 u32 i, offset, blockcount, remainsize;
63
64 blockcount = size / blocksize;
65 remainsize = size % blocksize;
66
67 for (i = 0; i < blockcount; i++) {
68 offset = i * blocksize;
69 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
70 *(pu4byteptr + i));
71 }
72 if (remainsize) {
73 offset = blockcount * blocksize;
74 bufferptr += offset;
75 for (i = 0; i < remainsize; i++) {
76 rtl_write_byte(rtlpriv,
77 (FW_8192C_START_ADDRESS + offset + i),
78 *(bufferptr + i));
79 }
80 }
81}
82EXPORT_SYMBOL_GPL(rtl8723_fw_block_write);
83
84void rtl8723_fw_page_write(struct ieee80211_hw *hw,
85 u32 page, const u8 *buffer, u32 size)
86{
87 struct rtl_priv *rtlpriv = rtl_priv(hw);
88 u8 value8;
89 u8 u8page = (u8) (page & 0x07);
90
91 value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
92
93 rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
94 rtl8723_fw_block_write(hw, buffer, size);
95}
96EXPORT_SYMBOL_GPL(rtl8723_fw_page_write);
97
98static void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
99{
100 u32 fwlen = *pfwlen;
101 u8 remain = (u8) (fwlen % 4);
102
103 remain = (remain == 0) ? 0 : (4 - remain);
104
105 while (remain > 0) {
106 pfwbuf[fwlen] = 0;
107 fwlen++;
108 remain--;
109 }
110 *pfwlen = fwlen;
111}
112
113void rtl8723_write_fw(struct ieee80211_hw *hw,
114 enum version_8723e version,
115 u8 *buffer, u32 size)
116{
117 struct rtl_priv *rtlpriv = rtl_priv(hw);
118 u8 *bufferptr = buffer;
119 u32 pagenums, remainsize;
120 u32 page, offset;
121
122 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
123
124 rtl8723_fill_dummy(bufferptr, &size);
125
126 pagenums = size / FW_8192C_PAGE_SIZE;
127 remainsize = size % FW_8192C_PAGE_SIZE;
128
129 if (pagenums > 8) {
130 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
131 "Page numbers should not greater then 8\n");
132 }
133 for (page = 0; page < pagenums; page++) {
134 offset = page * FW_8192C_PAGE_SIZE;
135 rtl8723_fw_page_write(hw, page, (bufferptr + offset),
136 FW_8192C_PAGE_SIZE);
137 }
138 if (remainsize) {
139 offset = pagenums * FW_8192C_PAGE_SIZE;
140 page = pagenums;
141 rtl8723_fw_page_write(hw, page, (bufferptr + offset),
142 remainsize);
143 }
144}
145EXPORT_SYMBOL_GPL(rtl8723_write_fw);
146
147void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
148{
149 u8 u1tmp;
150 u8 delay = 100;
151 struct rtl_priv *rtlpriv = rtl_priv(hw);
152
153 rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
154 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
155
156 while (u1tmp & BIT(2)) {
157 delay--;
158 if (delay == 0)
159 break;
160 udelay(50);
161 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
162 }
163 if (delay == 0) {
164 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
165 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
166 }
167}
168EXPORT_SYMBOL_GPL(rtl8723ae_firmware_selfreset);
169
170void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw)
171{
172 u8 u1b_tmp;
173 struct rtl_priv *rtlpriv = rtl_priv(hw);
174
175 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
176 rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
177
178 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
179 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
180 udelay(50);
181
182 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
183 rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp | BIT(0)));
184
185 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
186 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
187
188 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
189 " _8051Reset8723be(): 8051 reset success .\n");
190}
191EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset);
192
193int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be)
194{
195 struct rtl_priv *rtlpriv = rtl_priv(hw);
196 int err = -EIO;
197 u32 counter = 0;
198 u32 value32;
199
200 do {
201 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
202 } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
203 (!(value32 & FWDL_CHKSUM_RPT)));
204
205 if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
206 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
207 "chksum report fail ! REG_MCUFWDL:0x%08x .\n",
208 value32);
209 goto exit;
210 }
211 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
212 "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
213
214 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY;
215 value32 &= ~WINTINI_RDY;
216 rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
217
218 if (is_8723be)
219 rtl8723be_firmware_selfreset(hw);
220 counter = 0;
221
222 do {
223 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
224 if (value32 & WINTINI_RDY) {
225 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
226 "Polling FW ready success!! "
227 "REG_MCUFWDL:0x%08x .\n",
228 value32);
229 err = 0;
230 goto exit;
231 }
232 udelay(FW_8192C_POLLING_DELAY);
233
234 } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
235
236 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
237 "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
238 value32);
239
240exit:
241 return err;
242}
243EXPORT_SYMBOL_GPL(rtl8723_fw_free_to_go);
244
245int rtl8723_download_fw(struct ieee80211_hw *hw,
246 bool is_8723be)
247{
248 struct rtl_priv *rtlpriv = rtl_priv(hw);
249 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
250 struct rtl92c_firmware_header *pfwheader;
251 u8 *pfwdata;
252 u32 fwsize;
253 int err;
254 enum version_8723e version = rtlhal->version;
255
256 if (!rtlhal->pfirmware)
257 return 1;
258
259 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
260 pfwdata = rtlhal->pfirmware;
261 fwsize = rtlhal->fwsize;
262 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
263 "normal Firmware SIZE %d\n", fwsize);
264
265 if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) {
266 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
267 "Firmware Version(%d), Signature(%#x), Size(%d)\n",
268 pfwheader->version, pfwheader->signature,
269 (int)sizeof(struct rtl92c_firmware_header));
270
271 pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
272 fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
273 }
274 if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
275 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
276 if (is_8723be)
277 rtl8723be_firmware_selfreset(hw);
278 else
279 rtl8723ae_firmware_selfreset(hw);
280 }
281 rtl8723_enable_fw_download(hw, true);
282 rtl8723_write_fw(hw, version, pfwdata, fwsize);
283 rtl8723_enable_fw_download(hw, false);
284
285 err = rtl8723_fw_free_to_go(hw, is_8723be);
286 if (err) {
287 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
288 "Firmware is not ready to run!\n");
289 } else {
290 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
291 "Firmware is ready to run!\n");
292 }
293 return 0;
294}
295EXPORT_SYMBOL_GPL(rtl8723_download_fw);
296
297bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
298 struct sk_buff *skb)
299{
300 struct rtl_priv *rtlpriv = rtl_priv(hw);
301 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
302 struct rtl8192_tx_ring *ring;
303 struct rtl_tx_desc *pdesc;
304 struct sk_buff *pskb = NULL;
305 u8 own;
306 unsigned long flags;
307
308 ring = &rtlpci->tx_ring[BEACON_QUEUE];
309
310 pskb = __skb_dequeue(&ring->queue);
311 if (pskb)
312 kfree_skb(pskb);
313
314 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
315
316 pdesc = &ring->desc[0];
317 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
318
319 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
320
321 __skb_queue_tail(&ring->queue, skb);
322
323 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
324
325 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
326
327 return true;
328}
329EXPORT_SYMBOL_GPL(rtl8723_cmd_send_packet);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
new file mode 100644
index 000000000000..cf1cc5804d06
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
@@ -0,0 +1,126 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __FW_COMMON_H__
27#define __FW_COMMON_H__
28
29#define REG_SYS_FUNC_EN 0x0002
30#define REG_MCUFWDL 0x0080
31#define FW_8192C_START_ADDRESS 0x1000
32#define FW_8192C_PAGE_SIZE 4096
33#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
34#define FW_8192C_POLLING_DELAY 5
35
36#define MCUFWDL_RDY BIT(1)
37#define FWDL_CHKSUM_RPT BIT(2)
38#define WINTINI_RDY BIT(6)
39
40#define REG_RSV_CTRL 0x001C
41#define REG_HMETFR 0x01CC
42
43enum version_8723e {
44 VERSION_TEST_UMC_CHIP_8723 = 0x0081,
45 VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
46 VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
47 VERSION_TEST_CHIP_1T1R_8723B = 0x0106,
48 VERSION_NORMAL_SMIC_CHIP_1T1R_8723B = 0x010E,
49 VERSION_UNKNOWN = 0xFF,
50};
51
52enum rtl8723ae_h2c_cmd {
53 H2C_AP_OFFLOAD = 0,
54 H2C_SETPWRMODE = 1,
55 H2C_JOINBSSRPT = 2,
56 H2C_RSVDPAGE = 3,
57 H2C_RSSI_REPORT = 4,
58 H2C_P2P_PS_CTW_CMD = 5,
59 H2C_P2P_PS_OFFLOAD = 6,
60 H2C_RA_MASK = 7,
61 MAX_H2CCMD
62};
63
64enum rtl8723be_cmd {
65 H2C_8723BE_RSVDPAGE = 0,
66 H2C_8723BE_JOINBSSRPT = 1,
67 H2C_8723BE_SCAN = 2,
68 H2C_8723BE_KEEP_ALIVE_CTRL = 3,
69 H2C_8723BE_DISCONNECT_DECISION = 4,
70 H2C_8723BE_INIT_OFFLOAD = 6,
71 H2C_8723BE_AP_OFFLOAD = 8,
72 H2C_8723BE_BCN_RSVDPAGE = 9,
73 H2C_8723BE_PROBERSP_RSVDPAGE = 10,
74
75 H2C_8723BE_SETPWRMODE = 0x20,
76 H2C_8723BE_PS_TUNING_PARA = 0x21,
77 H2C_8723BE_PS_TUNING_PARA2 = 0x22,
78 H2C_8723BE_PS_LPS_PARA = 0x23,
79 H2C_8723BE_P2P_PS_OFFLOAD = 0x24,
80
81 H2C_8723BE_WO_WLAN = 0x80,
82 H2C_8723BE_REMOTE_WAKE_CTRL = 0x81,
83 H2C_8723BE_AOAC_GLOBAL_INFO = 0x82,
84 H2C_8723BE_AOAC_RSVDPAGE = 0x83,
85 H2C_8723BE_RSSI_REPORT = 0x42,
86 H2C_8723BE_RA_MASK = 0x40,
87 H2C_8723BE_SELECTIVE_SUSPEND_ROF_CMD,
88 H2C_8723BE_P2P_PS_MODE,
89 H2C_8723BE_PSD_RESULT,
90 /*Not defined CTW CMD for P2P yet*/
91 H2C_8723BE_P2P_PS_CTW_CMD,
92 MAX_8723BE_H2CCMD
93};
94
95struct rtl92c_firmware_header {
96 u16 signature;
97 u8 category;
98 u8 function;
99 u16 version;
100 u8 subversion;
101 u8 rsvd1;
102 u8 month;
103 u8 date;
104 u8 hour;
105 u8 minute;
106 u16 ramcodesize;
107 u16 rsvd2;
108 u32 svnindex;
109 u32 rsvd3;
110 u32 rsvd4;
111 u32 rsvd5;
112};
113
114void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
115void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
116void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable);
117void rtl8723_fw_block_write(struct ieee80211_hw *hw,
118 const u8 *buffer, u32 size);
119void rtl8723_fw_page_write(struct ieee80211_hw *hw,
120 u32 page, const u8 *buffer, u32 size);
121void rtl8723_write_fw(struct ieee80211_hw *hw,
122 enum version_8723e version,
123 u8 *buffer, u32 size);
124int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be);
125int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be);
126#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/rtlwifi/rtl8723com/main.c
new file mode 100644
index 000000000000..9014a94fac6a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/main.c
@@ -0,0 +1,33 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include <linux/module.h>
28
29
30MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
31MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
32MODULE_LICENSE("GPL");
33MODULE_DESCRIPTION("Realtek RTL8723AE/RTL8723BE 802.11n PCI wireless common routines");
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
new file mode 100644
index 000000000000..d73b659bd2b5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
@@ -0,0 +1,434 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "../wifi.h"
27#include "phy_common.h"
28#include "../rtl8723ae/reg.h"
29#include <linux/module.h>
30
31/* These routines are common to RTL8723AE and RTL8723bE */
32
33u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
34 u32 regaddr, u32 bitmask)
35{
36 struct rtl_priv *rtlpriv = rtl_priv(hw);
37 u32 returnvalue, originalvalue, bitshift;
38
39 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
40 "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
41 originalvalue = rtl_read_dword(rtlpriv, regaddr);
42 bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
43 returnvalue = (originalvalue & bitmask) >> bitshift;
44
45 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
46 "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n",
47 bitmask, regaddr, originalvalue);
48
49 return returnvalue;
50}
51EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg);
52
53void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
54 u32 bitmask, u32 data)
55{
56 struct rtl_priv *rtlpriv = rtl_priv(hw);
57 u32 originalvalue, bitshift;
58
59 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
60 "regaddr(%#x), bitmask(%#x), data(%#x)\n",
61 regaddr, bitmask, data);
62
63 if (bitmask != MASKDWORD) {
64 originalvalue = rtl_read_dword(rtlpriv, regaddr);
65 bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
66 data = ((originalvalue & (~bitmask)) | (data << bitshift));
67 }
68
69 rtl_write_dword(rtlpriv, regaddr, data);
70
71 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
72 "regaddr(%#x), bitmask(%#x), data(%#x)\n",
73 regaddr, bitmask, data);
74}
75EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
76
77u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
78{
79 u32 i;
80
81 for (i = 0; i <= 31; i++) {
82 if (((bitmask >> i) & 0x1) == 1)
83 break;
84 }
85 return i;
86}
87EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
88
89u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
90 enum radio_path rfpath, u32 offset)
91{
92 struct rtl_priv *rtlpriv = rtl_priv(hw);
93 struct rtl_phy *rtlphy = &(rtlpriv->phy);
94 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
95 u32 newoffset;
96 u32 tmplong, tmplong2;
97 u8 rfpi_enable = 0;
98 u32 retvalue;
99
100 offset &= 0xff;
101 newoffset = offset;
102 if (RT_CANNOT_IO(hw)) {
103 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
104 return 0xFFFFFFFF;
105 }
106 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
107 if (rfpath == RF90_PATH_A)
108 tmplong2 = tmplong;
109 else
110 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
111 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
112 (newoffset << 23) | BLSSIREADEDGE;
113 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
114 tmplong & (~BLSSIREADEDGE));
115 mdelay(1);
116 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
117 mdelay(2);
118 if (rfpath == RF90_PATH_A)
119 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
120 BIT(8));
121 else if (rfpath == RF90_PATH_B)
122 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
123 BIT(8));
124 if (rfpi_enable)
125 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
126 BLSSIREADBACKDATA);
127 else
128 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
129 BLSSIREADBACKDATA);
130 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
131 "RFR-%d Addr[0x%x]= 0x%x\n",
132 rfpath, pphyreg->rf_rb, retvalue);
133 return retvalue;
134}
135EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read);
136
137void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
138 enum radio_path rfpath,
139 u32 offset, u32 data)
140{
141 u32 data_and_addr;
142 u32 newoffset;
143 struct rtl_priv *rtlpriv = rtl_priv(hw);
144 struct rtl_phy *rtlphy = &(rtlpriv->phy);
145 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
146
147 if (RT_CANNOT_IO(hw)) {
148 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
149 return;
150 }
151 offset &= 0xff;
152 newoffset = offset;
153 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
154 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
155 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
156 "RFW-%d Addr[0x%x]= 0x%x\n", rfpath,
157 pphyreg->rf3wire_offset, data_and_addr);
158}
159EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write);
160
161long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
162 enum wireless_mode wirelessmode,
163 u8 txpwridx)
164{
165 long offset;
166 long pwrout_dbm;
167
168 switch (wirelessmode) {
169 case WIRELESS_MODE_B:
170 offset = -7;
171 break;
172 case WIRELESS_MODE_G:
173 case WIRELESS_MODE_N_24G:
174 default:
175 offset = -8;
176 break;
177 }
178 pwrout_dbm = txpwridx / 2 + offset;
179 return pwrout_dbm;
180}
181EXPORT_SYMBOL_GPL(rtl8723_phy_txpwr_idx_to_dbm);
182
183void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
184{
185 struct rtl_priv *rtlpriv = rtl_priv(hw);
186 struct rtl_phy *rtlphy = &(rtlpriv->phy);
187
188 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
189 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
190 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
191 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
192
193 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
194 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
195 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
196 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
197
198 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
199 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
200
201 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
202 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
203
204 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
205 RFPGA0_XA_LSSIPARAMETER;
206 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
207 RFPGA0_XB_LSSIPARAMETER;
208
209 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
210 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
211 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
212 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
213
214 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
215 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
216 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
217 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
218
219 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
220 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
221
222 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
223 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
224
225 rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
226 rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
227 rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
228 rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
229
230 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
231 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
232 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
233 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
234
235 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
236 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
237 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
238 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
239
240 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
241 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
242 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
243 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
244
245 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
246 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
247 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
248 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
249
250 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
251 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
252 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
253 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
254
255 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
256 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
257 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
258 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
259
260 rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
261 rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
262 rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
263 rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
264
265 rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
266 rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
267}
268EXPORT_SYMBOL_GPL(rtl8723_phy_init_bb_rf_reg_def);
269
270bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
271 u32 cmdtableidx,
272 u32 cmdtablesz,
273 enum swchnlcmd_id cmdid,
274 u32 para1, u32 para2,
275 u32 msdelay)
276{
277 struct swchnlcmd *pcmd;
278
279 if (cmdtable == NULL) {
280 RT_ASSERT(false, "cmdtable cannot be NULL.\n");
281 return false;
282 }
283
284 if (cmdtableidx >= cmdtablesz)
285 return false;
286
287 pcmd = cmdtable + cmdtableidx;
288 pcmd->cmdid = cmdid;
289 pcmd->para1 = para1;
290 pcmd->para2 = para2;
291 pcmd->msdelay = msdelay;
292 return true;
293}
294EXPORT_SYMBOL_GPL(rtl8723_phy_set_sw_chnl_cmdarray);
295
296void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
297 bool iqk_ok,
298 long result[][8],
299 u8 final_candidate,
300 bool btxonly)
301{
302 u32 oldval_0, x, tx0_a, reg;
303 long y, tx0_c;
304
305 if (final_candidate == 0xFF) {
306 return;
307 } else if (iqk_ok) {
308 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
309 MASKDWORD) >> 22) & 0x3FF;
310 x = result[final_candidate][0];
311 if ((x & 0x00000200) != 0)
312 x = x | 0xFFFFFC00;
313 tx0_a = (x * oldval_0) >> 8;
314 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
315 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
316 ((x * oldval_0 >> 7) & 0x1));
317 y = result[final_candidate][1];
318 if ((y & 0x00000200) != 0)
319 y = y | 0xFFFFFC00;
320 tx0_c = (y * oldval_0) >> 8;
321 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
322 ((tx0_c & 0x3C0) >> 6));
323 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
324 (tx0_c & 0x3F));
325 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
326 ((y * oldval_0 >> 7) & 0x1));
327 if (btxonly)
328 return;
329 reg = result[final_candidate][2];
330 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
331 reg = result[final_candidate][3] & 0x3F;
332 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
333 reg = (result[final_candidate][3] >> 6) & 0xF;
334 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
335 }
336}
337EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_fill_iqk_matrix);
338
339void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
340 u32 *addabackup, u32 registernum)
341{
342 u32 i;
343
344 for (i = 0; i < registernum; i++)
345 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
346}
347EXPORT_SYMBOL_GPL(rtl8723_save_adda_registers);
348
349void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
350 u32 *macreg, u32 *macbackup)
351{
352 struct rtl_priv *rtlpriv = rtl_priv(hw);
353 u32 i;
354
355 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
356 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
357 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
358}
359EXPORT_SYMBOL_GPL(rtl8723_phy_save_mac_registers);
360
361void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
362 u32 *addareg, u32 *addabackup,
363 u32 regiesternum)
364{
365 u32 i;
366
367 for (i = 0; i < regiesternum; i++)
368 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
369}
370EXPORT_SYMBOL_GPL(rtl8723_phy_reload_adda_registers);
371
372void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
373 u32 *macreg, u32 *macbackup)
374{
375 struct rtl_priv *rtlpriv = rtl_priv(hw);
376 u32 i;
377
378 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
379 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
380 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
381}
382EXPORT_SYMBOL_GPL(rtl8723_phy_reload_mac_registers);
383
384void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
385 bool is_patha_on, bool is2t)
386{
387 u32 pathon;
388 u32 i;
389
390 pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
391 if (!is2t) {
392 pathon = 0x0bdb25a0;
393 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
394 } else {
395 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
396 }
397
398 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
399 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
400}
401EXPORT_SYMBOL_GPL(rtl8723_phy_path_adda_on);
402
403void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
404 u32 *macreg, u32 *macbackup)
405{
406 struct rtl_priv *rtlpriv = rtl_priv(hw);
407 u32 i = 0;
408
409 rtl_write_byte(rtlpriv, macreg[i], 0x3F);
410
411 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
412 rtl_write_byte(rtlpriv, macreg[i],
413 (u8) (macbackup[i] & (~BIT(3))));
414 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
415}
416EXPORT_SYMBOL_GPL(rtl8723_phy_mac_setting_calibration);
417
418void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw)
419{
420 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
421 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
422 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
423}
424EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_standby);
425
426void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
427{
428 u32 mode;
429
430 mode = pi_mode ? 0x01000100 : 0x01000000;
431 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
432 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
433}
434EXPORT_SYMBOL_GPL(rtl8723_phy_pi_mode_switch);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
new file mode 100644
index 000000000000..83b891a9adb8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
@@ -0,0 +1,89 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2014 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#ifndef __PHY_COMMON__
27#define __PHY_COMMON__
28
29#define RT_CANNOT_IO(hw) false
30
31enum swchnlcmd_id {
32 CMDID_END,
33 CMDID_SET_TXPOWEROWER_LEVEL,
34 CMDID_BBREGWRITE10,
35 CMDID_WRITEPORT_ULONG,
36 CMDID_WRITEPORT_USHORT,
37 CMDID_WRITEPORT_UCHAR,
38 CMDID_RF_WRITEREG,
39};
40
41struct swchnlcmd {
42 enum swchnlcmd_id cmdid;
43 u32 para1;
44 u32 para2;
45 u32 msdelay;
46};
47
48u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
49 u32 regaddr, u32 bitmask);
50void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
51 u32 bitmask, u32 data);
52u32 rtl8723_phy_calculate_bit_shift(u32 bitmask);
53u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
54 enum radio_path rfpath, u32 offset);
55void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
56 enum radio_path rfpath,
57 u32 offset, u32 data);
58long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
59 enum wireless_mode wirelessmode,
60 u8 txpwridx);
61void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
62bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
63 u32 cmdtableidx,
64 u32 cmdtablesz,
65 enum swchnlcmd_id cmdid,
66 u32 para1, u32 para2,
67 u32 msdelay);
68void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
69 bool iqk_ok,
70 long result[][8],
71 u8 final_candidate,
72 bool btxonly);
73void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
74 u32 *addabackup, u32 registernum);
75void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
76 u32 *macreg, u32 *macbackup);
77void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
78 u32 *addareg, u32 *addabackup,
79 u32 regiesternum);
80void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
81 u32 *macreg, u32 *macbackup);
82void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
83 bool is_patha_on, bool is2t);
84void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
85 u32 *macreg, u32 *macbackup);
86void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw);
87void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode);
88
89#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 4933f02ce1d5..0398d3ea15b0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -410,7 +410,7 @@ static void rtl_usb_init_sw(struct ieee80211_hw *hw)
410 mac->current_ampdu_factor = 3; 410 mac->current_ampdu_factor = 3;
411 411
412 /* QOS */ 412 /* QOS */
413 rtlusb->acm_method = eAcmWay2_SW; 413 rtlusb->acm_method = EACMWAY2_SW;
414 414
415 /* IRQ */ 415 /* IRQ */
416 /* HIMR - turn all on */ 416 /* HIMR - turn all on */
@@ -994,7 +994,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
994 seq_number += 1; 994 seq_number += 1;
995 seq_number <<= 4; 995 seq_number <<= 4;
996 } 996 }
997 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb, 997 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb,
998 hw_queue, &tcb_desc); 998 hw_queue, &tcb_desc);
999 if (!ieee80211_has_morefrags(hdr->frame_control)) { 999 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1000 if (qc) 1000 if (qc)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 8c647391bedf..6965afdf572a 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -41,6 +41,38 @@
41#include <linux/completion.h> 41#include <linux/completion.h>
42#include "debug.h" 42#include "debug.h"
43 43
44#define MASKBYTE0 0xff
45#define MASKBYTE1 0xff00
46#define MASKBYTE2 0xff0000
47#define MASKBYTE3 0xff000000
48#define MASKHWORD 0xffff0000
49#define MASKLWORD 0x0000ffff
50#define MASKDWORD 0xffffffff
51#define MASK12BITS 0xfff
52#define MASKH4BITS 0xf0000000
53#define MASKOFDM_D 0xffc00000
54#define MASKCCK 0x3f3f3f3f
55
56#define MASK4BITS 0x0f
57#define MASK20BITS 0xfffff
58#define RFREG_OFFSET_MASK 0xfffff
59
60#define MASKBYTE0 0xff
61#define MASKBYTE1 0xff00
62#define MASKBYTE2 0xff0000
63#define MASKBYTE3 0xff000000
64#define MASKHWORD 0xffff0000
65#define MASKLWORD 0x0000ffff
66#define MASKDWORD 0xffffffff
67#define MASK12BITS 0xfff
68#define MASKH4BITS 0xf0000000
69#define MASKOFDM_D 0xffc00000
70#define MASKCCK 0x3f3f3f3f
71
72#define MASK4BITS 0x0f
73#define MASK20BITS 0xfffff
74#define RFREG_OFFSET_MASK 0xfffff
75
44#define RF_CHANGE_BY_INIT 0 76#define RF_CHANGE_BY_INIT 0
45#define RF_CHANGE_BY_IPS BIT(28) 77#define RF_CHANGE_BY_IPS BIT(28)
46#define RF_CHANGE_BY_PS BIT(29) 78#define RF_CHANGE_BY_PS BIT(29)
@@ -49,6 +81,7 @@
49 81
50#define IQK_ADDA_REG_NUM 16 82#define IQK_ADDA_REG_NUM 16
51#define IQK_MAC_REG_NUM 4 83#define IQK_MAC_REG_NUM 4
84#define IQK_THRESHOLD 8
52 85
53#define MAX_KEY_LEN 61 86#define MAX_KEY_LEN 61
54#define KEY_BUF_SIZE 5 87#define KEY_BUF_SIZE 5
@@ -86,7 +119,18 @@
86#define MAC80211_4ADDR_LEN 30 119#define MAC80211_4ADDR_LEN 30
87 120
88#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */ 121#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
122#define CHANNEL_MAX_NUMBER_2G 14
123#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
124 *"phy_GetChnlGroup8812A" and
125 * "Hal_ReadTxPowerInfo8812A"
126 */
127#define CHANNEL_MAX_NUMBER_5G_80M 7
89#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */ 128#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
129#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
130 *"phy_GetChnlGroup8812A" and
131 * "Hal_ReadTxPowerInfo8812A"
132 */
133#define CHANNEL_MAX_NUMBER_5G_80M 7
90#define MAX_PG_GROUP 13 134#define MAX_PG_GROUP 13
91#define CHANNEL_GROUP_MAX_2G 3 135#define CHANNEL_GROUP_MAX_2G 3
92#define CHANNEL_GROUP_IDX_5GL 3 136#define CHANNEL_GROUP_IDX_5GL 3
@@ -96,6 +140,7 @@
96#define CHANNEL_MAX_NUMBER_2G 14 140#define CHANNEL_MAX_NUMBER_2G 14
97#define AVG_THERMAL_NUM 8 141#define AVG_THERMAL_NUM 8
98#define AVG_THERMAL_NUM_88E 4 142#define AVG_THERMAL_NUM_88E 4
143#define AVG_THERMAL_NUM_8723BE 4
99#define MAX_TID_COUNT 9 144#define MAX_TID_COUNT 9
100 145
101/* for early mode */ 146/* for early mode */
@@ -107,6 +152,24 @@
107#define MAX_CHNL_GROUP_24G 6 152#define MAX_CHNL_GROUP_24G 6
108#define MAX_CHNL_GROUP_5G 14 153#define MAX_CHNL_GROUP_5G 14
109 154
155#define TX_PWR_BY_RATE_NUM_BAND 2
156#define TX_PWR_BY_RATE_NUM_RF 4
157#define TX_PWR_BY_RATE_NUM_SECTION 12
158#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6
159#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5
160
161#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
162
163#define DEL_SW_IDX_SZ 30
164#define BAND_NUM 3
165
166enum rf_tx_num {
167 RF_1TX = 0,
168 RF_2TX,
169 RF_MAX_TX_NUM,
170 RF_TX_NUM_NONIMPLEMENT,
171};
172
110struct txpower_info_2g { 173struct txpower_info_2g {
111 u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; 174 u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
112 u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; 175 u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -115,6 +178,8 @@ struct txpower_info_2g {
115 u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT]; 178 u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
116 u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT]; 179 u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
117 u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT]; 180 u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
181 u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
182 u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
118}; 183};
119 184
120struct txpower_info_5g { 185struct txpower_info_5g {
@@ -123,6 +188,17 @@ struct txpower_info_5g {
123 u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT]; 188 u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
124 u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT]; 189 u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
125 u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT]; 190 u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
191 u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
192 u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
193};
194
195enum rate_section {
196 CCK = 0,
197 OFDM,
198 HT_MCS0_MCS7,
199 HT_MCS8_MCS15,
200 VHT_1SSMCS0_1SSMCS9,
201 VHT_2SSMCS0_2SSMCS9,
126}; 202};
127 203
128enum intf_type { 204enum intf_type {
@@ -158,7 +234,10 @@ enum hardware_type {
158 HARDWARE_TYPE_RTL8192DU, 234 HARDWARE_TYPE_RTL8192DU,
159 HARDWARE_TYPE_RTL8723AE, 235 HARDWARE_TYPE_RTL8723AE,
160 HARDWARE_TYPE_RTL8723U, 236 HARDWARE_TYPE_RTL8723U,
237 HARDWARE_TYPE_RTL8723BE,
161 HARDWARE_TYPE_RTL8188EE, 238 HARDWARE_TYPE_RTL8188EE,
239 HARDWARE_TYPE_RTL8821AE,
240 HARDWARE_TYPE_RTL8812AE,
162 241
163 /* keep it last */ 242 /* keep it last */
164 HARDWARE_TYPE_NUM 243 HARDWARE_TYPE_NUM
@@ -195,8 +274,16 @@ enum hardware_type {
195 _pdesc->rxmcs == DESC92_RATE5_5M || \ 274 _pdesc->rxmcs == DESC92_RATE5_5M || \
196 _pdesc->rxmcs == DESC92_RATE11M) 275 _pdesc->rxmcs == DESC92_RATE11M)
197 276
277#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \
278 ((rxmcs) == DESC92_RATE1M || \
279 (rxmcs) == DESC92_RATE2M || \
280 (rxmcs) == DESC92_RATE5_5M || \
281 (rxmcs) == DESC92_RATE11M)
282
198enum scan_operation_backup_opt { 283enum scan_operation_backup_opt {
199 SCAN_OPT_BACKUP = 0, 284 SCAN_OPT_BACKUP = 0,
285 SCAN_OPT_BACKUP_BAND0 = 0,
286 SCAN_OPT_BACKUP_BAND1,
200 SCAN_OPT_RESTORE, 287 SCAN_OPT_RESTORE,
201 SCAN_OPT_MAX 288 SCAN_OPT_MAX
202}; 289};
@@ -231,7 +318,9 @@ struct bb_reg_def {
231 318
232enum io_type { 319enum io_type {
233 IO_CMD_PAUSE_DM_BY_SCAN = 0, 320 IO_CMD_PAUSE_DM_BY_SCAN = 0,
234 IO_CMD_RESUME_DM_BY_SCAN = 1, 321 IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
322 IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
323 IO_CMD_RESUME_DM_BY_SCAN = 2,
235}; 324};
236 325
237enum hw_variables { 326enum hw_variables {
@@ -298,6 +387,7 @@ enum hw_variables {
298 HW_VAR_SET_RPWM, 387 HW_VAR_SET_RPWM,
299 HW_VAR_H2C_FW_PWRMODE, 388 HW_VAR_H2C_FW_PWRMODE,
300 HW_VAR_H2C_FW_JOINBSSRPT, 389 HW_VAR_H2C_FW_JOINBSSRPT,
390 HW_VAR_H2C_FW_MEDIASTATUSRPT,
301 HW_VAR_H2C_FW_P2P_PS_OFFLOAD, 391 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
302 HW_VAR_FW_PSMODE_STATUS, 392 HW_VAR_FW_PSMODE_STATUS,
303 HW_VAR_RESUME_CLK_ON, 393 HW_VAR_RESUME_CLK_ON,
@@ -330,6 +420,8 @@ enum hw_variables {
330 420
331 HAL_DEF_WOWLAN, 421 HAL_DEF_WOWLAN,
332 HW_VAR_MRC, 422 HW_VAR_MRC,
423 HW_VAR_KEEP_ALIVE,
424 HW_VAR_NAV_UPPER,
333 425
334 HW_VAR_MGT_FILTER, 426 HW_VAR_MGT_FILTER,
335 HW_VAR_CTRL_FILTER, 427 HW_VAR_CTRL_FILTER,
@@ -348,34 +440,34 @@ enum rt_oem_id {
348 RT_CID_8187_HW_LED = 3, 440 RT_CID_8187_HW_LED = 3,
349 RT_CID_8187_NETGEAR = 4, 441 RT_CID_8187_NETGEAR = 4,
350 RT_CID_WHQL = 5, 442 RT_CID_WHQL = 5,
351 RT_CID_819x_CAMEO = 6, 443 RT_CID_819X_CAMEO = 6,
352 RT_CID_819x_RUNTOP = 7, 444 RT_CID_819X_RUNTOP = 7,
353 RT_CID_819x_Senao = 8, 445 RT_CID_819X_SENAO = 8,
354 RT_CID_TOSHIBA = 9, 446 RT_CID_TOSHIBA = 9,
355 RT_CID_819x_Netcore = 10, 447 RT_CID_819X_NETCORE = 10,
356 RT_CID_Nettronix = 11, 448 RT_CID_NETTRONIX = 11,
357 RT_CID_DLINK = 12, 449 RT_CID_DLINK = 12,
358 RT_CID_PRONET = 13, 450 RT_CID_PRONET = 13,
359 RT_CID_COREGA = 14, 451 RT_CID_COREGA = 14,
360 RT_CID_819x_ALPHA = 15, 452 RT_CID_819X_ALPHA = 15,
361 RT_CID_819x_Sitecom = 16, 453 RT_CID_819X_SITECOM = 16,
362 RT_CID_CCX = 17, 454 RT_CID_CCX = 17,
363 RT_CID_819x_Lenovo = 18, 455 RT_CID_819X_LENOVO = 18,
364 RT_CID_819x_QMI = 19, 456 RT_CID_819X_QMI = 19,
365 RT_CID_819x_Edimax_Belkin = 20, 457 RT_CID_819X_EDIMAX_BELKIN = 20,
366 RT_CID_819x_Sercomm_Belkin = 21, 458 RT_CID_819X_SERCOMM_BELKIN = 21,
367 RT_CID_819x_CAMEO1 = 22, 459 RT_CID_819X_CAMEO1 = 22,
368 RT_CID_819x_MSI = 23, 460 RT_CID_819X_MSI = 23,
369 RT_CID_819x_Acer = 24, 461 RT_CID_819X_ACER = 24,
370 RT_CID_819x_HP = 27, 462 RT_CID_819X_HP = 27,
371 RT_CID_819x_CLEVO = 28, 463 RT_CID_819X_CLEVO = 28,
372 RT_CID_819x_Arcadyan_Belkin = 29, 464 RT_CID_819X_ARCADYAN_BELKIN = 29,
373 RT_CID_819x_SAMSUNG = 30, 465 RT_CID_819X_SAMSUNG = 30,
374 RT_CID_819x_WNC_COREGA = 31, 466 RT_CID_819X_WNC_COREGA = 31,
375 RT_CID_819x_Foxcoon = 32, 467 RT_CID_819X_FOXCOON = 32,
376 RT_CID_819x_DELL = 33, 468 RT_CID_819X_DELL = 33,
377 RT_CID_819x_PRONETS = 34, 469 RT_CID_819X_PRONETS = 34,
378 RT_CID_819x_Edimax_ASUS = 35, 470 RT_CID_819X_EDIMAX_ASUS = 35,
379 RT_CID_NETGEAR = 36, 471 RT_CID_NETGEAR = 36,
380 RT_CID_PLANEX = 37, 472 RT_CID_PLANEX = 37,
381 RT_CID_CC_C = 38, 473 RT_CID_CC_C = 38,
@@ -389,6 +481,7 @@ enum hw_descs {
389 HW_DESC_RXBUFF_ADDR, 481 HW_DESC_RXBUFF_ADDR,
390 HW_DESC_RXPKT_LEN, 482 HW_DESC_RXPKT_LEN,
391 HW_DESC_RXERO, 483 HW_DESC_RXERO,
484 HW_DESC_RX_PREPARE,
392}; 485};
393 486
394enum prime_sc { 487enum prime_sc {
@@ -407,6 +500,7 @@ enum rf_type {
407enum ht_channel_width { 500enum ht_channel_width {
408 HT_CHANNEL_WIDTH_20 = 0, 501 HT_CHANNEL_WIDTH_20 = 0,
409 HT_CHANNEL_WIDTH_20_40 = 1, 502 HT_CHANNEL_WIDTH_20_40 = 1,
503 HT_CHANNEL_WIDTH_80 = 2,
410}; 504};
411 505
412/* Ref: 802.11i sepc D10.0 7.3.2.25.1 506/* Ref: 802.11i sepc D10.0 7.3.2.25.1
@@ -471,6 +565,9 @@ enum rtl_var_map {
471 MAC_RCR_ACRC32, 565 MAC_RCR_ACRC32,
472 MAC_RCR_ACF, 566 MAC_RCR_ACF,
473 MAC_RCR_AAP, 567 MAC_RCR_AAP,
568 MAC_HIMR,
569 MAC_HIMRE,
570 MAC_HSISR,
474 571
475 /*efuse map */ 572 /*efuse map */
476 EFUSE_TEST, 573 EFUSE_TEST,
@@ -608,7 +705,7 @@ enum rtl_led_pin {
608enum acm_method { 705enum acm_method {
609 eAcmWay0_SwAndHw = 0, 706 eAcmWay0_SwAndHw = 0,
610 eAcmWay1_HW = 1, 707 eAcmWay1_HW = 1,
611 eAcmWay2_SW = 2, 708 EACMWAY2_SW = 2,
612}; 709};
613 710
614enum macphy_mode { 711enum macphy_mode {
@@ -645,7 +742,9 @@ enum wireless_mode {
645 WIRELESS_MODE_G = 0x04, 742 WIRELESS_MODE_G = 0x04,
646 WIRELESS_MODE_AUTO = 0x08, 743 WIRELESS_MODE_AUTO = 0x08,
647 WIRELESS_MODE_N_24G = 0x10, 744 WIRELESS_MODE_N_24G = 0x10,
648 WIRELESS_MODE_N_5G = 0x20 745 WIRELESS_MODE_N_5G = 0x20,
746 WIRELESS_MODE_AC_5G = 0x40,
747 WIRELESS_MODE_AC_24G = 0x80
649}; 748};
650 749
651#define IS_WIRELESS_MODE_A(wirelessmode) \ 750#define IS_WIRELESS_MODE_A(wirelessmode) \
@@ -669,6 +768,8 @@ enum ratr_table_mode {
669 RATR_INX_WIRELESS_B = 6, 768 RATR_INX_WIRELESS_B = 6,
670 RATR_INX_WIRELESS_MC = 7, 769 RATR_INX_WIRELESS_MC = 7,
671 RATR_INX_WIRELESS_A = 8, 770 RATR_INX_WIRELESS_A = 8,
771 RATR_INX_WIRELESS_AC_5N = 8,
772 RATR_INX_WIRELESS_AC_24N = 9,
672}; 773};
673 774
674enum rtl_link_state { 775enum rtl_link_state {
@@ -803,8 +904,12 @@ struct wireless_stats {
803 long signal_strength; 904 long signal_strength;
804 905
805 u8 rx_rssi_percentage[4]; 906 u8 rx_rssi_percentage[4];
907 u8 rx_evm_dbm[4];
806 u8 rx_evm_percentage[2]; 908 u8 rx_evm_percentage[2];
807 909
910 u16 rx_cfo_short[4];
911 u16 rx_cfo_tail[4];
912
808 struct rt_smooth_data ui_rssi; 913 struct rt_smooth_data ui_rssi;
809 struct rt_smooth_data ui_link_quality; 914 struct rt_smooth_data ui_link_quality;
810}; 915};
@@ -817,9 +922,9 @@ struct rate_adaptive {
817 u32 high_rssi_thresh_for_ra; 922 u32 high_rssi_thresh_for_ra;
818 u32 high2low_rssi_thresh_for_ra; 923 u32 high2low_rssi_thresh_for_ra;
819 u8 low2high_rssi_thresh_for_ra40m; 924 u8 low2high_rssi_thresh_for_ra40m;
820 u32 low_rssi_thresh_for_ra40M; 925 u32 low_rssi_thresh_for_ra40m;
821 u8 low2high_rssi_thresh_for_ra20m; 926 u8 low2high_rssi_thresh_for_ra20m;
822 u32 low_rssi_thresh_for_ra20M; 927 u32 low_rssi_thresh_for_ra20m;
823 u32 upper_rssi_threshold_ratr; 928 u32 upper_rssi_threshold_ratr;
824 u32 middleupper_rssi_threshold_ratr; 929 u32 middleupper_rssi_threshold_ratr;
825 u32 middle_rssi_threshold_ratr; 930 u32 middle_rssi_threshold_ratr;
@@ -833,6 +938,10 @@ struct rate_adaptive {
833 u32 ping_rssi_thresh_for_ra; 938 u32 ping_rssi_thresh_for_ra;
834 u32 last_ratr; 939 u32 last_ratr;
835 u8 pre_ratr_state; 940 u8 pre_ratr_state;
941 u8 ldpc_thres;
942 bool use_ldpc;
943 bool lower_rts_rate;
944 bool is_special_data;
836}; 945};
837 946
838struct regd_pair_mapping { 947struct regd_pair_mapping {
@@ -841,6 +950,16 @@ struct regd_pair_mapping {
841 u16 reg_2ghz_ctl; 950 u16 reg_2ghz_ctl;
842}; 951};
843 952
953struct dynamic_primary_cca {
954 u8 pricca_flag;
955 u8 intf_flag;
956 u8 intf_type;
957 u8 dup_rts_flag;
958 u8 monitor_flag;
959 u8 ch_offset;
960 u8 mf_state;
961};
962
844struct rtl_regulatory { 963struct rtl_regulatory {
845 char alpha2[2]; 964 char alpha2[2];
846 u16 country_code; 965 u16 country_code;
@@ -976,16 +1095,29 @@ struct rtl_phy {
976 u32 iqk_bb_backup[10]; 1095 u32 iqk_bb_backup[10];
977 bool iqk_initialized; 1096 bool iqk_initialized;
978 1097
1098 bool rfpath_rx_enable[MAX_RF_PATH];
1099 u8 reg_837;
979 /* Dual mac */ 1100 /* Dual mac */
980 bool need_iqk; 1101 bool need_iqk;
981 struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM]; 1102 struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM];
982 1103
983 bool rfpi_enable; 1104 bool rfpi_enable;
1105 bool iqk_in_progress;
984 1106
985 u8 pwrgroup_cnt; 1107 u8 pwrgroup_cnt;
986 u8 cck_high_power; 1108 u8 cck_high_power;
987 /* MAX_PG_GROUP groups of pwr diff by rates */ 1109 /* MAX_PG_GROUP groups of pwr diff by rates */
988 u32 mcs_offset[MAX_PG_GROUP][16]; 1110 u32 mcs_offset[MAX_PG_GROUP][16];
1111 u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
1112 [TX_PWR_BY_RATE_NUM_RF]
1113 [TX_PWR_BY_RATE_NUM_RF]
1114 [TX_PWR_BY_RATE_NUM_SECTION];
1115 u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
1116 [TX_PWR_BY_RATE_NUM_RF]
1117 [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
1118 u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
1119 [TX_PWR_BY_RATE_NUM_RF]
1120 [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
989 u8 default_initialgain[4]; 1121 u8 default_initialgain[4];
990 1122
991 /* the current Tx power level */ 1123 /* the current Tx power level */
@@ -998,6 +1130,7 @@ struct rtl_phy {
998 bool apk_done; 1130 bool apk_done;
999 u32 reg_rf3c[2]; /* pathA / pathB */ 1131 u32 reg_rf3c[2]; /* pathA / pathB */
1000 1132
1133 u32 backup_rf_0x1a;/*92ee*/
1001 /* bfsync */ 1134 /* bfsync */
1002 u8 framesync; 1135 u8 framesync;
1003 u32 framesync_c34; 1136 u32 framesync_c34;
@@ -1006,6 +1139,7 @@ struct rtl_phy {
1006 struct phy_parameters hwparam_tables[MAX_TAB]; 1139 struct phy_parameters hwparam_tables[MAX_TAB];
1007 u16 rf_pathmap; 1140 u16 rf_pathmap;
1008 1141
1142 u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
1009 enum rt_polarity_ctl polarity_ctl; 1143 enum rt_polarity_ctl polarity_ctl;
1010}; 1144};
1011 1145
@@ -1133,6 +1267,7 @@ struct rtl_mac {
1133 u8 use_cts_protect; 1267 u8 use_cts_protect;
1134 u8 cur_40_prime_sc; 1268 u8 cur_40_prime_sc;
1135 u8 cur_40_prime_sc_bk; 1269 u8 cur_40_prime_sc_bk;
1270 u8 cur_80_prime_sc;
1136 u64 tsf; 1271 u64 tsf;
1137 u8 retry_short; 1272 u8 retry_short;
1138 u8 retry_long; 1273 u8 retry_long;
@@ -1213,6 +1348,7 @@ struct rtl_hal {
1213 bool being_init_adapter; 1348 bool being_init_adapter;
1214 bool bbrf_ready; 1349 bool bbrf_ready;
1215 bool mac_func_enable; 1350 bool mac_func_enable;
1351 bool pre_edcca_enable;
1216 struct bt_coexist_8723 hal_coex_8723; 1352 struct bt_coexist_8723 hal_coex_8723;
1217 1353
1218 enum intf_type interface; 1354 enum intf_type interface;
@@ -1234,6 +1370,7 @@ struct rtl_hal {
1234 /*Reserve page start offset except beacon in TxQ. */ 1370 /*Reserve page start offset except beacon in TxQ. */
1235 u8 fw_rsvdpage_startoffset; 1371 u8 fw_rsvdpage_startoffset;
1236 u8 h2c_txcmd_seq; 1372 u8 h2c_txcmd_seq;
1373 u8 current_ra_rate;
1237 1374
1238 /* FW Cmd IO related */ 1375 /* FW Cmd IO related */
1239 u16 fwcmd_iomap; 1376 u16 fwcmd_iomap;
@@ -1273,6 +1410,9 @@ struct rtl_hal {
1273 bool disable_amsdu_8k; 1410 bool disable_amsdu_8k;
1274 bool master_of_dmsp; 1411 bool master_of_dmsp;
1275 bool slave_of_dmsp; 1412 bool slave_of_dmsp;
1413
1414 u16 rx_tag;/*for 92ee*/
1415 u8 rts_en;
1276}; 1416};
1277 1417
1278struct rtl_security { 1418struct rtl_security {
@@ -1321,6 +1461,16 @@ struct fast_ant_training {
1321 bool becomelinked; 1461 bool becomelinked;
1322}; 1462};
1323 1463
1464struct dm_phy_dbg_info {
1465 char rx_snrdb[4];
1466 u64 num_qry_phy_status;
1467 u64 num_qry_phy_status_cck;
1468 u64 num_qry_phy_status_ofdm;
1469 u16 num_qry_beacon_pkt;
1470 u16 num_non_be_pkt;
1471 s32 rx_evm[4];
1472};
1473
1324struct rtl_dm { 1474struct rtl_dm {
1325 /*PHY status for Dynamic Management */ 1475 /*PHY status for Dynamic Management */
1326 long entry_min_undec_sm_pwdb; 1476 long entry_min_undec_sm_pwdb;
@@ -1360,29 +1510,84 @@ struct rtl_dm {
1360 u8 txpower_track_control; 1510 u8 txpower_track_control;
1361 bool interrupt_migration; 1511 bool interrupt_migration;
1362 bool disable_tx_int; 1512 bool disable_tx_int;
1363 char ofdm_index[2]; 1513 char ofdm_index[MAX_RF_PATH];
1514 u8 default_ofdm_index;
1515 u8 default_cck_index;
1364 char cck_index; 1516 char cck_index;
1365 char delta_power_index; 1517 char delta_power_index[MAX_RF_PATH];
1366 char delta_power_index_last; 1518 char delta_power_index_last[MAX_RF_PATH];
1367 char power_index_offset; 1519 char power_index_offset[MAX_RF_PATH];
1520 char absolute_ofdm_swing_idx[MAX_RF_PATH];
1521 char remnant_ofdm_swing_idx[MAX_RF_PATH];
1522 char remnant_cck_idx;
1523 bool modify_txagc_flag_path_a;
1524 bool modify_txagc_flag_path_b;
1525
1526 bool one_entry_only;
1527 struct dm_phy_dbg_info dbginfo;
1528
1529 /* Dynamic ATC switch */
1530 bool atc_status;
1531 bool large_cfo_hit;
1532 bool is_freeze;
1533 int cfo_tail[2];
1534 int cfo_ave_pre;
1535 int crystal_cap;
1536 u8 cfo_threshold;
1537 u32 packet_count;
1538 u32 packet_count_pre;
1539 u8 tx_rate;
1368 1540
1369 /*88e tx power tracking*/ 1541 /*88e tx power tracking*/
1370 u8 swing_idx_ofdm[2]; 1542 u8 swing_idx_ofdm[MAX_RF_PATH];
1371 u8 swing_idx_ofdm_cur; 1543 u8 swing_idx_ofdm_cur;
1372 u8 swing_idx_ofdm_base; 1544 u8 swing_idx_ofdm_base[MAX_RF_PATH];
1373 bool swing_flag_ofdm; 1545 bool swing_flag_ofdm;
1374 u8 swing_idx_cck; 1546 u8 swing_idx_cck;
1375 u8 swing_idx_cck_cur; 1547 u8 swing_idx_cck_cur;
1376 u8 swing_idx_cck_base; 1548 u8 swing_idx_cck_base;
1377 bool swing_flag_cck; 1549 bool swing_flag_cck;
1378 1550
1551 char swing_diff_2g;
1552 char swing_diff_5g;
1553
1554 u8 delta_swing_table_idx_24gccka_p[DEL_SW_IDX_SZ];
1555 u8 delta_swing_table_idx_24gccka_n[DEL_SW_IDX_SZ];
1556 u8 delta_swing_table_idx_24gcckb_p[DEL_SW_IDX_SZ];
1557 u8 delta_swing_table_idx_24gcckb_n[DEL_SW_IDX_SZ];
1558 u8 delta_swing_table_idx_24ga_p[DEL_SW_IDX_SZ];
1559 u8 delta_swing_table_idx_24ga_n[DEL_SW_IDX_SZ];
1560 u8 delta_swing_table_idx_24gb_p[DEL_SW_IDX_SZ];
1561 u8 delta_swing_table_idx_24gb_n[DEL_SW_IDX_SZ];
1562 u8 delta_swing_table_idx_5ga_p[BAND_NUM][DEL_SW_IDX_SZ];
1563 u8 delta_swing_table_idx_5ga_n[BAND_NUM][DEL_SW_IDX_SZ];
1564 u8 delta_swing_table_idx_5gb_p[BAND_NUM][DEL_SW_IDX_SZ];
1565 u8 delta_swing_table_idx_5gb_n[BAND_NUM][DEL_SW_IDX_SZ];
1566 u8 delta_swing_table_idx_24ga_p_8188e[DEL_SW_IDX_SZ];
1567 u8 delta_swing_table_idx_24ga_n_8188e[DEL_SW_IDX_SZ];
1568
1379 /* DMSP */ 1569 /* DMSP */
1380 bool supp_phymode_switch; 1570 bool supp_phymode_switch;
1381 1571
1572 /* DulMac */
1382 struct fast_ant_training fat_table; 1573 struct fast_ant_training fat_table;
1574
1575 u8 resp_tx_path;
1576 u8 path_sel;
1577 u32 patha_sum;
1578 u32 pathb_sum;
1579 u32 patha_cnt;
1580 u32 pathb_cnt;
1581
1582 u8 pre_channel;
1583 u8 *p_channel;
1584 u8 linked_interval;
1585
1586 u64 last_tx_ok_cnt;
1587 u64 last_rx_ok_cnt;
1383}; 1588};
1384 1589
1385#define EFUSE_MAX_LOGICAL_SIZE 256 1590#define EFUSE_MAX_LOGICAL_SIZE 512
1386 1591
1387struct rtl_efuse { 1592struct rtl_efuse {
1388 bool autoLoad_ok; 1593 bool autoLoad_ok;
@@ -1422,12 +1627,9 @@ struct rtl_efuse {
1422 u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */ 1627 u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
1423 u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX]; 1628 u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
1424 u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX]; 1629 u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
1425 u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G]; 1630 u8 eeprom_chnlarea_txpwr_cck[MAX_RF_PATH][CHANNEL_GROUP_MAX_2G];
1426 u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX]; 1631 u8 eeprom_chnlarea_txpwr_ht40_1s[MAX_RF_PATH][CHANNEL_GROUP_MAX];
1427 u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX]; 1632 u8 eprom_chnl_txpwr_ht40_2sdf[MAX_RF_PATH][CHANNEL_GROUP_MAX];
1428 u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
1429 u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
1430 u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
1431 1633
1432 u8 internal_pa_5g[2]; /* pathA / pathB */ 1634 u8 internal_pa_5g[2]; /* pathA / pathB */
1433 u8 eeprom_c9; 1635 u8 eeprom_c9;
@@ -1438,9 +1640,38 @@ struct rtl_efuse {
1438 u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER]; 1640 u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
1439 u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER]; 1641 u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
1440 1642
1441 char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */ 1643 u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
1442 /*For HT<->legacy pwr diff*/ 1644 /*For HT 40MHZ pwr */
1443 u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER]; 1645 u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
1646 /*For HT 40MHZ pwr */
1647 u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
1648
1649 /*--------------------------------------------------------*
1650 * 8192CE\8192SE\8192DE\8723AE use the following 4 arrays,
1651 * other ICs (8188EE\8723BE\8192EE\8812AE...)
1652 * define new arrays in Windows code.
1653 * BUT, in linux code, we use the same array for all ICs.
1654 *
1655 * The Correspondance relation between two arrays is:
1656 * txpwr_cckdiff[][] == CCK_24G_Diff[][]
1657 * txpwr_ht20diff[][] == BW20_24G_Diff[][]
1658 * txpwr_ht40diff[][] == BW40_24G_Diff[][]
1659 * txpwr_legacyhtdiff[][] == OFDM_24G_Diff[][]
1660 *
1661 * Sizes of these arrays are decided by the larger ones.
1662 */
1663 char txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
1664 char txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
1665 char txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
1666 char txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
1667
1668 u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
1669 u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
1670 char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
1671 char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
1672 char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
1673 char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
1674
1444 u8 txpwr_safetyflag; /* Band edge enable flag */ 1675 u8 txpwr_safetyflag; /* Band edge enable flag */
1445 u16 eeprom_txpowerdiff; 1676 u16 eeprom_txpowerdiff;
1446 u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */ 1677 u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
@@ -1571,7 +1802,9 @@ struct rtl_stats {
1571 bool rx_is40Mhzpacket; 1802 bool rx_is40Mhzpacket;
1572 u32 rx_pwdb_all; 1803 u32 rx_pwdb_all;
1573 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ 1804 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
1574 s8 rx_mimo_sig_qual[2]; 1805 s8 rx_mimo_sig_qual[4];
1806 u8 rx_pwr[4]; /* per-path's pwdb */
1807 u8 rx_snr[4]; /* per-path's SNR */
1575 bool packet_matchbssid; 1808 bool packet_matchbssid;
1576 bool is_cck; 1809 bool is_cck;
1577 bool is_ht; 1810 bool is_ht;
@@ -1644,6 +1877,8 @@ struct rtl_tcb_desc {
1644 bool btx_enable_sw_calc_duration; 1877 bool btx_enable_sw_calc_duration;
1645}; 1878};
1646 1879
1880struct rtl92c_firmware_header;
1881
1647struct rtl_hal_ops { 1882struct rtl_hal_ops {
1648 int (*init_sw_vars) (struct ieee80211_hw *hw); 1883 int (*init_sw_vars) (struct ieee80211_hw *hw);
1649 void (*deinit_sw_vars) (struct ieee80211_hw *hw); 1884 void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -1673,9 +1908,17 @@ struct rtl_hal_ops {
1673 void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val); 1908 void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
1674 void (*update_rate_tbl) (struct ieee80211_hw *hw, 1909 void (*update_rate_tbl) (struct ieee80211_hw *hw,
1675 struct ieee80211_sta *sta, u8 rssi_level); 1910 struct ieee80211_sta *sta, u8 rssi_level);
1911 void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
1912 u8 *desc, u8 queue_index,
1913 struct sk_buff *skb, dma_addr_t addr);
1676 void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level); 1914 void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
1915 u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
1916 u8 queue_index);
1917 void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
1918 u8 queue_index);
1677 void (*fill_tx_desc) (struct ieee80211_hw *hw, 1919 void (*fill_tx_desc) (struct ieee80211_hw *hw,
1678 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 1920 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1921 u8 *pbd_desc_tx,
1679 struct ieee80211_tx_info *info, 1922 struct ieee80211_tx_info *info,
1680 struct ieee80211_sta *sta, 1923 struct ieee80211_sta *sta,
1681 struct sk_buff *skb, u8 hw_queue, 1924 struct sk_buff *skb, u8 hw_queue,
@@ -1698,8 +1941,11 @@ struct rtl_hal_ops {
1698 enum rf_pwrstate rfpwr_state); 1941 enum rf_pwrstate rfpwr_state);
1699 void (*led_control) (struct ieee80211_hw *hw, 1942 void (*led_control) (struct ieee80211_hw *hw,
1700 enum led_ctl_mode ledaction); 1943 enum led_ctl_mode ledaction);
1701 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val); 1944 void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
1945 u8 desc_name, u8 *val);
1702 u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name); 1946 u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
1947 bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
1948 u8 hw_queue, u16 index);
1703 void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue); 1949 void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue);
1704 void (*enable_hw_sec) (struct ieee80211_hw *hw); 1950 void (*enable_hw_sec) (struct ieee80211_hw *hw);
1705 void (*set_key) (struct ieee80211_hw *hw, u32 key_index, 1951 void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1738,6 +1984,10 @@ struct rtl_hal_ops {
1738 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); 1984 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
1739 void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, 1985 void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
1740 u32 cmd_len, u8 *p_cmdbuffer); 1986 u32 cmd_len, u8 *p_cmdbuffer);
1987 bool (*get_btc_status) (void);
1988 bool (*is_fw_header) (struct rtl92c_firmware_header *hdr);
1989 u32 (*rx_command_packet)(struct ieee80211_hw *hw,
1990 struct rtl_stats status, struct sk_buff *skb);
1741}; 1991};
1742 1992
1743struct rtl_intf_ops { 1993struct rtl_intf_ops {
@@ -1847,6 +2097,8 @@ struct rtl_locks {
1847 2097
1848 /*Easy concurrent*/ 2098 /*Easy concurrent*/
1849 spinlock_t check_sendpkt_lock; 2099 spinlock_t check_sendpkt_lock;
2100
2101 spinlock_t iqk_lock;
1850}; 2102};
1851 2103
1852struct rtl_works { 2104struct rtl_works {
@@ -1915,6 +2167,7 @@ struct ps_t {
1915 u8 cur_ccasate; 2167 u8 cur_ccasate;
1916 u8 pre_rfstate; 2168 u8 pre_rfstate;
1917 u8 cur_rfstate; 2169 u8 cur_rfstate;
2170 u8 initialize;
1918 long rssi_val_min; 2171 long rssi_val_min;
1919}; 2172};
1920 2173
@@ -1939,6 +2192,7 @@ struct dig_t {
1939 u8 cursta_cstate; 2192 u8 cursta_cstate;
1940 u8 presta_cstate; 2193 u8 presta_cstate;
1941 u8 curmultista_cstate; 2194 u8 curmultista_cstate;
2195 u8 stop_dig;
1942 char back_val; 2196 char back_val;
1943 char back_range_max; 2197 char back_range_max;
1944 char back_range_min; 2198 char back_range_min;
@@ -1956,6 +2210,7 @@ struct dig_t {
1956 u8 cur_ccasate; 2210 u8 cur_ccasate;
1957 u8 large_fa_hit; 2211 u8 large_fa_hit;
1958 u8 dig_dynamic_min; 2212 u8 dig_dynamic_min;
2213 u8 dig_dynamic_min_1;
1959 u8 forbidden_igi; 2214 u8 forbidden_igi;
1960 u8 dig_state; 2215 u8 dig_state;
1961 u8 dig_highpwrstate; 2216 u8 dig_highpwrstate;
@@ -1972,6 +2227,7 @@ struct dig_t {
1972 char backoffval_range_min; 2227 char backoffval_range_min;
1973 u8 dig_min_0; 2228 u8 dig_min_0;
1974 u8 dig_min_1; 2229 u8 dig_min_1;
2230 u8 bt30_cur_igi;
1975 bool media_connect_0; 2231 bool media_connect_0;
1976 bool media_connect_1; 2232 bool media_connect_1;
1977 2233
@@ -1986,6 +2242,96 @@ struct rtl_global_var {
1986 spinlock_t glb_list_lock; 2242 spinlock_t glb_list_lock;
1987}; 2243};
1988 2244
2245struct rtl_btc_info {
2246 u8 bt_type;
2247 u8 btcoexist;
2248 u8 ant_num;
2249};
2250
2251struct bt_coexist_info {
2252 struct rtl_btc_ops *btc_ops;
2253 struct rtl_btc_info btc_info;
2254 /* EEPROM BT info. */
2255 u8 eeprom_bt_coexist;
2256 u8 eeprom_bt_type;
2257 u8 eeprom_bt_ant_num;
2258 u8 eeprom_bt_ant_isol;
2259 u8 eeprom_bt_radio_shared;
2260
2261 u8 bt_coexistence;
2262 u8 bt_ant_num;
2263 u8 bt_coexist_type;
2264 u8 bt_state;
2265 u8 bt_cur_state; /* 0:on, 1:off */
2266 u8 bt_ant_isolation; /* 0:good, 1:bad */
2267 u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
2268 u8 bt_service;
2269 u8 bt_radio_shared_type;
2270 u8 bt_rfreg_origin_1e;
2271 u8 bt_rfreg_origin_1f;
2272 u8 bt_rssi_state;
2273 u32 ratio_tx;
2274 u32 ratio_pri;
2275 u32 bt_edca_ul;
2276 u32 bt_edca_dl;
2277
2278 bool init_set;
2279 bool bt_busy_traffic;
2280 bool bt_traffic_mode_set;
2281 bool bt_non_traffic_mode_set;
2282
2283 bool fw_coexist_all_off;
2284 bool sw_coexist_all_off;
2285 bool hw_coexist_all_off;
2286 u32 cstate;
2287 u32 previous_state;
2288 u32 cstate_h;
2289 u32 previous_state_h;
2290
2291 u8 bt_pre_rssi_state;
2292 u8 bt_pre_rssi_state1;
2293
2294 u8 reg_bt_iso;
2295 u8 reg_bt_sco;
2296 bool balance_on;
2297 u8 bt_active_zero_cnt;
2298 bool cur_bt_disabled;
2299 bool pre_bt_disabled;
2300
2301 u8 bt_profile_case;
2302 u8 bt_profile_action;
2303 bool bt_busy;
2304 bool hold_for_bt_operation;
2305 u8 lps_counter;
2306};
2307
2308struct rtl_btc_ops {
2309 void (*btc_init_variables) (struct rtl_priv *rtlpriv);
2310 void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
2311 void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
2312 void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
2313 void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
2314 void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
2315 void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
2316 enum _RT_MEDIA_STATUS mstatus);
2317 void (*btc_periodical) (struct rtl_priv *rtlpriv);
2318 void (*btc_halt_notify) (void);
2319 void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
2320 u8 *tmp_buf, u8 length);
2321 bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
2322 bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
2323 bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
2324};
2325
2326struct proxim {
2327 bool proxim_on;
2328
2329 void *proximity_priv;
2330 int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
2331 struct sk_buff *skb);
2332 u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
2333};
2334
1989struct rtl_priv { 2335struct rtl_priv {
1990 struct ieee80211_hw *hw; 2336 struct ieee80211_hw *hw;
1991 struct completion firmware_loading_complete; 2337 struct completion firmware_loading_complete;
@@ -2008,6 +2354,7 @@ struct rtl_priv {
2008 2354
2009 struct rtl_ps_ctl psc; 2355 struct rtl_ps_ctl psc;
2010 struct rate_adaptive ra; 2356 struct rate_adaptive ra;
2357 struct dynamic_primary_cca primarycca;
2011 struct wireless_stats stats; 2358 struct wireless_stats stats;
2012 struct rt_link_detect link_info; 2359 struct rt_link_detect link_info;
2013 struct false_alarm_statistics falsealm_cnt; 2360 struct false_alarm_statistics falsealm_cnt;
@@ -2048,6 +2395,20 @@ struct rtl_priv {
2048 bool enter_ps; /* true when entering PS */ 2395 bool enter_ps; /* true when entering PS */
2049 u8 rate_mask[5]; 2396 u8 rate_mask[5];
2050 2397
2398 /* intel Proximity, should be alloc mem
2399 * in intel Proximity module and can only
2400 * be used in intel Proximity mode
2401 */
2402 struct proxim proximity;
2403
2404 /*for bt coexist use*/
2405 struct bt_coexist_info btcoexist;
2406
2407 /* separate 92ee from other ICs,
2408 * 92ee use new trx flow.
2409 */
2410 bool use_new_trx_flow;
2411
2051 /*This must be the last item so 2412 /*This must be the last item so
2052 that it points to the data allocated 2413 that it points to the data allocated
2053 beyond this structure like: 2414 beyond this structure like:
@@ -2079,6 +2440,15 @@ enum bt_co_type {
2079 BT_CSR_BC8 = 4, 2440 BT_CSR_BC8 = 4,
2080 BT_RTL8756 = 5, 2441 BT_RTL8756 = 5,
2081 BT_RTL8723A = 6, 2442 BT_RTL8723A = 6,
2443 BT_RTL8821A = 7,
2444 BT_RTL8723B = 8,
2445 BT_RTL8192E = 9,
2446 BT_RTL8812A = 11,
2447};
2448
2449enum bt_total_ant_num {
2450 ANT_TOTAL_X2 = 0,
2451 ANT_TOTAL_X1 = 1
2082}; 2452};
2083 2453
2084enum bt_cur_state { 2454enum bt_cur_state {
@@ -2104,62 +2474,6 @@ enum bt_radio_shared {
2104 BT_RADIO_INDIVIDUAL = 1, 2474 BT_RADIO_INDIVIDUAL = 1,
2105}; 2475};
2106 2476
2107struct bt_coexist_info {
2108
2109 /* EEPROM BT info. */
2110 u8 eeprom_bt_coexist;
2111 u8 eeprom_bt_type;
2112 u8 eeprom_bt_ant_num;
2113 u8 eeprom_bt_ant_isol;
2114 u8 eeprom_bt_radio_shared;
2115
2116 u8 bt_coexistence;
2117 u8 bt_ant_num;
2118 u8 bt_coexist_type;
2119 u8 bt_state;
2120 u8 bt_cur_state; /* 0:on, 1:off */
2121 u8 bt_ant_isolation; /* 0:good, 1:bad */
2122 u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
2123 u8 bt_service;
2124 u8 bt_radio_shared_type;
2125 u8 bt_rfreg_origin_1e;
2126 u8 bt_rfreg_origin_1f;
2127 u8 bt_rssi_state;
2128 u32 ratio_tx;
2129 u32 ratio_pri;
2130 u32 bt_edca_ul;
2131 u32 bt_edca_dl;
2132
2133 bool init_set;
2134 bool bt_busy_traffic;
2135 bool bt_traffic_mode_set;
2136 bool bt_non_traffic_mode_set;
2137
2138 bool fw_coexist_all_off;
2139 bool sw_coexist_all_off;
2140 bool hw_coexist_all_off;
2141 u32 cstate;
2142 u32 previous_state;
2143 u32 cstate_h;
2144 u32 previous_state_h;
2145
2146 u8 bt_pre_rssi_state;
2147 u8 bt_pre_rssi_state1;
2148
2149 u8 reg_bt_iso;
2150 u8 reg_bt_sco;
2151 bool balance_on;
2152 u8 bt_active_zero_cnt;
2153 bool cur_bt_disabled;
2154 bool pre_bt_disabled;
2155
2156 u8 bt_profile_case;
2157 u8 bt_profile_action;
2158 bool bt_busy;
2159 bool hold_for_bt_operation;
2160 u8 lps_counter;
2161};
2162
2163 2477
2164/**************************************** 2478/****************************************
2165 mem access macro define start 2479 mem access macro define start
diff --git a/drivers/net/wireless/ti/wilink_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c
index 998e95895f9d..a92bd3e89796 100644
--- a/drivers/net/wireless/ti/wilink_platform_data.c
+++ b/drivers/net/wireless/ti/wilink_platform_data.c
@@ -23,17 +23,17 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/wl12xx.h> 24#include <linux/wl12xx.h>
25 25
26static struct wl12xx_platform_data *platform_data; 26static struct wl12xx_platform_data *wl12xx_platform_data;
27 27
28int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data) 28int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
29{ 29{
30 if (platform_data) 30 if (wl12xx_platform_data)
31 return -EBUSY; 31 return -EBUSY;
32 if (!data) 32 if (!data)
33 return -EINVAL; 33 return -EINVAL;
34 34
35 platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL); 35 wl12xx_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
36 if (!platform_data) 36 if (!wl12xx_platform_data)
37 return -ENOMEM; 37 return -ENOMEM;
38 38
39 return 0; 39 return 0;
@@ -41,9 +41,34 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
41 41
42struct wl12xx_platform_data *wl12xx_get_platform_data(void) 42struct wl12xx_platform_data *wl12xx_get_platform_data(void)
43{ 43{
44 if (!platform_data) 44 if (!wl12xx_platform_data)
45 return ERR_PTR(-ENODEV); 45 return ERR_PTR(-ENODEV);
46 46
47 return platform_data; 47 return wl12xx_platform_data;
48} 48}
49EXPORT_SYMBOL(wl12xx_get_platform_data); 49EXPORT_SYMBOL(wl12xx_get_platform_data);
50
51static struct wl1251_platform_data *wl1251_platform_data;
52
53int __init wl1251_set_platform_data(const struct wl1251_platform_data *data)
54{
55 if (wl1251_platform_data)
56 return -EBUSY;
57 if (!data)
58 return -EINVAL;
59
60 wl1251_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
61 if (!wl1251_platform_data)
62 return -ENOMEM;
63
64 return 0;
65}
66
67struct wl1251_platform_data *wl1251_get_platform_data(void)
68{
69 if (!wl1251_platform_data)
70 return ERR_PTR(-ENODEV);
71
72 return wl1251_platform_data;
73}
74EXPORT_SYMBOL(wl1251_get_platform_data);
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 223649bcaa5a..bf1fa18b9786 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -448,7 +448,7 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
448 * Note: This bug may be caused by the fw's DTIM handling. 448 * Note: This bug may be caused by the fw's DTIM handling.
449 */ 449 */
450 if (is_zero_ether_addr(wl->bssid)) 450 if (is_zero_ether_addr(wl->bssid))
451 cmd->params.scan_options |= WL1251_SCAN_OPT_PRIORITY_HIGH; 451 cmd->params.scan_options |= cpu_to_le16(WL1251_SCAN_OPT_PRIORITY_HIGH);
452 cmd->params.num_channels = n_channels; 452 cmd->params.num_channels = n_channels;
453 cmd->params.num_probe_requests = n_probes; 453 cmd->params.num_probe_requests = n_probes;
454 cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */ 454 cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index e2b3d9c541e8..b661f896e9fe 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -28,6 +28,7 @@
28#include <linux/wl12xx.h> 28#include <linux/wl12xx.h>
29#include <linux/irq.h> 29#include <linux/irq.h>
30#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
31#include <linux/gpio.h>
31 32
32#include "wl1251.h" 33#include "wl1251.h"
33 34
@@ -182,8 +183,9 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
182 * callback in case it wants to do any additional setup, 183 * callback in case it wants to do any additional setup,
183 * for example enabling clock buffer for the module. 184 * for example enabling clock buffer for the module.
184 */ 185 */
185 if (wl->set_power) 186 if (gpio_is_valid(wl->power_gpio))
186 wl->set_power(true); 187 gpio_set_value(wl->power_gpio, true);
188
187 189
188 ret = pm_runtime_get_sync(&func->dev); 190 ret = pm_runtime_get_sync(&func->dev);
189 if (ret < 0) { 191 if (ret < 0) {
@@ -203,8 +205,8 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
203 if (ret < 0) 205 if (ret < 0)
204 goto out; 206 goto out;
205 207
206 if (wl->set_power) 208 if (gpio_is_valid(wl->power_gpio))
207 wl->set_power(false); 209 gpio_set_value(wl->power_gpio, false);
208 } 210 }
209 211
210out: 212out:
@@ -227,7 +229,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
227 struct wl1251 *wl; 229 struct wl1251 *wl;
228 struct ieee80211_hw *hw; 230 struct ieee80211_hw *hw;
229 struct wl1251_sdio *wl_sdio; 231 struct wl1251_sdio *wl_sdio;
230 const struct wl12xx_platform_data *wl12xx_board_data; 232 const struct wl1251_platform_data *wl1251_board_data;
231 233
232 hw = wl1251_alloc_hw(); 234 hw = wl1251_alloc_hw();
233 if (IS_ERR(hw)) 235 if (IS_ERR(hw))
@@ -254,11 +256,20 @@ static int wl1251_sdio_probe(struct sdio_func *func,
254 wl->if_priv = wl_sdio; 256 wl->if_priv = wl_sdio;
255 wl->if_ops = &wl1251_sdio_ops; 257 wl->if_ops = &wl1251_sdio_ops;
256 258
257 wl12xx_board_data = wl12xx_get_platform_data(); 259 wl1251_board_data = wl1251_get_platform_data();
258 if (!IS_ERR(wl12xx_board_data)) { 260 if (!IS_ERR(wl1251_board_data)) {
259 wl->set_power = wl12xx_board_data->set_power; 261 wl->power_gpio = wl1251_board_data->power_gpio;
260 wl->irq = wl12xx_board_data->irq; 262 wl->irq = wl1251_board_data->irq;
261 wl->use_eeprom = wl12xx_board_data->use_eeprom; 263 wl->use_eeprom = wl1251_board_data->use_eeprom;
264 }
265
266 if (gpio_is_valid(wl->power_gpio)) {
267 ret = devm_gpio_request(&func->dev, wl->power_gpio,
268 "wl1251 power");
269 if (ret) {
270 wl1251_error("Failed to request gpio: %d\n", ret);
271 goto disable;
272 }
262 } 273 }
263 274
264 if (wl->irq) { 275 if (wl->irq) {
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 1342f81e683d..b06d36d99362 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -26,6 +26,10 @@
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/wl12xx.h> 28#include <linux/wl12xx.h>
29#include <linux/gpio.h>
30#include <linux/of.h>
31#include <linux/of_gpio.h>
32#include <linux/regulator/consumer.h>
29 33
30#include "wl1251.h" 34#include "wl1251.h"
31#include "reg.h" 35#include "reg.h"
@@ -221,8 +225,8 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
221 225
222static int wl1251_spi_set_power(struct wl1251 *wl, bool enable) 226static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
223{ 227{
224 if (wl->set_power) 228 if (gpio_is_valid(wl->power_gpio))
225 wl->set_power(enable); 229 gpio_set_value(wl->power_gpio, enable);
226 230
227 return 0; 231 return 0;
228} 232}
@@ -238,13 +242,13 @@ static const struct wl1251_if_operations wl1251_spi_ops = {
238 242
239static int wl1251_spi_probe(struct spi_device *spi) 243static int wl1251_spi_probe(struct spi_device *spi)
240{ 244{
241 struct wl12xx_platform_data *pdata; 245 struct wl1251_platform_data *pdata = dev_get_platdata(&spi->dev);
246 struct device_node *np = spi->dev.of_node;
242 struct ieee80211_hw *hw; 247 struct ieee80211_hw *hw;
243 struct wl1251 *wl; 248 struct wl1251 *wl;
244 int ret; 249 int ret;
245 250
246 pdata = dev_get_platdata(&spi->dev); 251 if (!np && !pdata) {
247 if (!pdata) {
248 wl1251_error("no platform data"); 252 wl1251_error("no platform data");
249 return -ENODEV; 253 return -ENODEV;
250 } 254 }
@@ -271,22 +275,42 @@ static int wl1251_spi_probe(struct spi_device *spi)
271 goto out_free; 275 goto out_free;
272 } 276 }
273 277
274 wl->set_power = pdata->set_power; 278 if (np) {
275 if (!wl->set_power) { 279 wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
276 wl1251_error("set power function missing in platform data"); 280 wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
277 return -ENODEV; 281 } else if (pdata) {
282 wl->power_gpio = pdata->power_gpio;
283 wl->use_eeprom = pdata->use_eeprom;
284 }
285
286 if (wl->power_gpio == -EPROBE_DEFER) {
287 ret = -EPROBE_DEFER;
288 goto out_free;
289 }
290
291 if (gpio_is_valid(wl->power_gpio)) {
292 ret = devm_gpio_request_one(&spi->dev, wl->power_gpio,
293 GPIOF_OUT_INIT_LOW, "wl1251 power");
294 if (ret) {
295 wl1251_error("Failed to request gpio: %d\n", ret);
296 goto out_free;
297 }
298 } else {
299 wl1251_error("set power gpio missing in platform data");
300 ret = -ENODEV;
301 goto out_free;
278 } 302 }
279 303
280 wl->irq = spi->irq; 304 wl->irq = spi->irq;
281 if (wl->irq < 0) { 305 if (wl->irq < 0) {
282 wl1251_error("irq missing in platform data"); 306 wl1251_error("irq missing in platform data");
283 return -ENODEV; 307 ret = -ENODEV;
308 goto out_free;
284 } 309 }
285 310
286 wl->use_eeprom = pdata->use_eeprom;
287
288 irq_set_status_flags(wl->irq, IRQ_NOAUTOEN); 311 irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
289 ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); 312 ret = devm_request_irq(&spi->dev, wl->irq, wl1251_irq, 0,
313 DRIVER_NAME, wl);
290 if (ret < 0) { 314 if (ret < 0) {
291 wl1251_error("request_irq() failed: %d", ret); 315 wl1251_error("request_irq() failed: %d", ret);
292 goto out_free; 316 goto out_free;
@@ -294,16 +318,26 @@ static int wl1251_spi_probe(struct spi_device *spi)
294 318
295 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 319 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
296 320
321 wl->vio = devm_regulator_get(&spi->dev, "vio");
322 if (IS_ERR(wl->vio)) {
323 ret = PTR_ERR(wl->vio);
324 wl1251_error("vio regulator missing: %d", ret);
325 goto out_free;
326 }
327
328 ret = regulator_enable(wl->vio);
329 if (ret)
330 goto out_free;
331
297 ret = wl1251_init_ieee80211(wl); 332 ret = wl1251_init_ieee80211(wl);
298 if (ret) 333 if (ret)
299 goto out_irq; 334 goto disable_regulator;
300 335
301 return 0; 336 return 0;
302 337
303 out_irq: 338disable_regulator:
304 free_irq(wl->irq, wl); 339 regulator_disable(wl->vio);
305 340out_free:
306 out_free:
307 ieee80211_free_hw(hw); 341 ieee80211_free_hw(hw);
308 342
309 return ret; 343 return ret;
@@ -315,6 +349,7 @@ static int wl1251_spi_remove(struct spi_device *spi)
315 349
316 free_irq(wl->irq, wl); 350 free_irq(wl->irq, wl);
317 wl1251_free_hw(wl); 351 wl1251_free_hw(wl);
352 regulator_disable(wl->vio);
318 353
319 return 0; 354 return 0;
320} 355}
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 235617a7716d..16dae5269175 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -276,10 +276,12 @@ struct wl1251 {
276 void *if_priv; 276 void *if_priv;
277 const struct wl1251_if_operations *if_ops; 277 const struct wl1251_if_operations *if_ops;
278 278
279 void (*set_power)(bool enable); 279 int power_gpio;
280 int irq; 280 int irq;
281 bool use_eeprom; 281 bool use_eeprom;
282 282
283 struct regulator *vio;
284
283 spinlock_t wl_lock; 285 spinlock_t wl_lock;
284 286
285 enum wl1251_state state; 287 enum wl1251_state state;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index be7129ba16ad..d50dfac91631 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1378,7 +1378,7 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
1378 1378
1379static int wl12xx_tx_delayed_compl(struct wl1271 *wl) 1379static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
1380{ 1380{
1381 if (wl->fw_status_1->tx_results_counter == 1381 if (wl->fw_status->tx_results_counter ==
1382 (wl->tx_results_count & 0xff)) 1382 (wl->tx_results_count & 0xff))
1383 return 0; 1383 return 0;
1384 1384
@@ -1438,6 +1438,37 @@ out:
1438 return ret; 1438 return ret;
1439} 1439}
1440 1440
1441static void wl12xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
1442 struct wl_fw_status *fw_status)
1443{
1444 struct wl12xx_fw_status *int_fw_status = raw_fw_status;
1445
1446 fw_status->intr = le32_to_cpu(int_fw_status->intr);
1447 fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
1448 fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
1449 fw_status->tx_results_counter = int_fw_status->tx_results_counter;
1450 fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
1451
1452 fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
1453 fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
1454 fw_status->link_fast_bitmap =
1455 le32_to_cpu(int_fw_status->link_fast_bitmap);
1456 fw_status->total_released_blks =
1457 le32_to_cpu(int_fw_status->total_released_blks);
1458 fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
1459
1460 fw_status->counters.tx_released_pkts =
1461 int_fw_status->counters.tx_released_pkts;
1462 fw_status->counters.tx_lnk_free_pkts =
1463 int_fw_status->counters.tx_lnk_free_pkts;
1464 fw_status->counters.tx_voice_released_blks =
1465 int_fw_status->counters.tx_voice_released_blks;
1466 fw_status->counters.tx_last_rate =
1467 int_fw_status->counters.tx_last_rate;
1468
1469 fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
1470}
1471
1441static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl, 1472static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
1442 struct wl12xx_vif *wlvif) 1473 struct wl12xx_vif *wlvif)
1443{ 1474{
@@ -1677,6 +1708,7 @@ static struct wlcore_ops wl12xx_ops = {
1677 .tx_delayed_compl = wl12xx_tx_delayed_compl, 1708 .tx_delayed_compl = wl12xx_tx_delayed_compl,
1678 .hw_init = wl12xx_hw_init, 1709 .hw_init = wl12xx_hw_init,
1679 .init_vif = NULL, 1710 .init_vif = NULL,
1711 .convert_fw_status = wl12xx_convert_fw_status,
1680 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask, 1712 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
1681 .get_pg_ver = wl12xx_get_pg_ver, 1713 .get_pg_ver = wl12xx_get_pg_ver,
1682 .get_mac = wl12xx_get_mac, 1714 .get_mac = wl12xx_get_mac,
@@ -1711,22 +1743,53 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1711 }, 1743 },
1712}; 1744};
1713 1745
1746static const struct ieee80211_iface_limit wl12xx_iface_limits[] = {
1747 {
1748 .max = 3,
1749 .types = BIT(NL80211_IFTYPE_STATION),
1750 },
1751 {
1752 .max = 1,
1753 .types = BIT(NL80211_IFTYPE_AP) |
1754 BIT(NL80211_IFTYPE_P2P_GO) |
1755 BIT(NL80211_IFTYPE_P2P_CLIENT),
1756 },
1757};
1758
1759static const struct ieee80211_iface_combination
1760wl12xx_iface_combinations[] = {
1761 {
1762 .max_interfaces = 3,
1763 .limits = wl12xx_iface_limits,
1764 .n_limits = ARRAY_SIZE(wl12xx_iface_limits),
1765 .num_different_channels = 1,
1766 },
1767};
1768
1714static int wl12xx_setup(struct wl1271 *wl) 1769static int wl12xx_setup(struct wl1271 *wl)
1715{ 1770{
1716 struct wl12xx_priv *priv = wl->priv; 1771 struct wl12xx_priv *priv = wl->priv;
1717 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev); 1772 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
1718 struct wl12xx_platform_data *pdata = pdev_data->pdata; 1773 struct wl12xx_platform_data *pdata = pdev_data->pdata;
1719 1774
1775 BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
1776 BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
1777
1720 wl->rtable = wl12xx_rtable; 1778 wl->rtable = wl12xx_rtable;
1721 wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS; 1779 wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
1722 wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS; 1780 wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
1723 wl->num_channels = 1; 1781 wl->num_links = WL12XX_MAX_LINKS;
1782 wl->max_ap_stations = WL12XX_MAX_AP_STATIONS;
1783 wl->iface_combinations = wl12xx_iface_combinations;
1784 wl->n_iface_combinations = ARRAY_SIZE(wl12xx_iface_combinations);
1724 wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES; 1785 wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
1725 wl->band_rate_to_idx = wl12xx_band_rate_to_idx; 1786 wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
1726 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX; 1787 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
1727 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0; 1788 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
1789 wl->fw_status_len = sizeof(struct wl12xx_fw_status);
1728 wl->fw_status_priv_len = 0; 1790 wl->fw_status_priv_len = 0;
1729 wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics); 1791 wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
1792 wl->ofdm_only_ap = true;
1730 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap); 1793 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
1731 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap); 1794 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
1732 wl12xx_conf_init(wl); 1795 wl12xx_conf_init(wl);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 9e5484a73667..75c92658bfea 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -65,6 +65,9 @@
65 65
66#define WL12XX_RX_BA_MAX_SESSIONS 3 66#define WL12XX_RX_BA_MAX_SESSIONS 3
67 67
68#define WL12XX_MAX_AP_STATIONS 8
69#define WL12XX_MAX_LINKS 12
70
68struct wl127x_rx_mem_pool_addr { 71struct wl127x_rx_mem_pool_addr {
69 u32 addr; 72 u32 addr;
70 u32 addr_extra; 73 u32 addr_extra;
@@ -79,4 +82,54 @@ struct wl12xx_priv {
79 struct wl127x_rx_mem_pool_addr *rx_mem_addr; 82 struct wl127x_rx_mem_pool_addr *rx_mem_addr;
80}; 83};
81 84
85struct wl12xx_fw_packet_counters {
86 /* Cumulative counter of released packets per AC */
87 u8 tx_released_pkts[NUM_TX_QUEUES];
88
89 /* Cumulative counter of freed packets per HLID */
90 u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
91
92 /* Cumulative counter of released Voice memory blocks */
93 u8 tx_voice_released_blks;
94
95 /* Tx rate of the last transmitted packet */
96 u8 tx_last_rate;
97
98 u8 padding[2];
99} __packed;
100
101/* FW status registers */
102struct wl12xx_fw_status {
103 __le32 intr;
104 u8 fw_rx_counter;
105 u8 drv_rx_counter;
106 u8 reserved;
107 u8 tx_results_counter;
108 __le32 rx_pkt_descs[WL12XX_NUM_RX_DESCRIPTORS];
109
110 __le32 fw_localtime;
111
112 /*
113 * A bitmap (where each bit represents a single HLID)
114 * to indicate if the station is in PS mode.
115 */
116 __le32 link_ps_bitmap;
117
118 /*
119 * A bitmap (where each bit represents a single HLID) to indicate
120 * if the station is in Fast mode
121 */
122 __le32 link_fast_bitmap;
123
124 /* Cumulative counter of total released mem blocks since FW-reset */
125 __le32 total_released_blks;
126
127 /* Size (in Memory Blocks) of TX pool */
128 __le32 tx_total;
129
130 struct wl12xx_fw_packet_counters counters;
131
132 __le32 log_start_addr;
133} __packed;
134
82#endif /* __WL12XX_PRIV_H__ */ 135#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ec37b16585df..de5b4fa5d166 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -648,7 +648,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
648}; 648};
649 649
650/* TODO: maybe move to a new header file? */ 650/* TODO: maybe move to a new header file? */
651#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin" 651#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin"
652 652
653static int wl18xx_identify_chip(struct wl1271 *wl) 653static int wl18xx_identify_chip(struct wl1271 *wl)
654{ 654{
@@ -1133,6 +1133,39 @@ static int wl18xx_hw_init(struct wl1271 *wl)
1133 return ret; 1133 return ret;
1134} 1134}
1135 1135
1136static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
1137 struct wl_fw_status *fw_status)
1138{
1139 struct wl18xx_fw_status *int_fw_status = raw_fw_status;
1140
1141 fw_status->intr = le32_to_cpu(int_fw_status->intr);
1142 fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
1143 fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
1144 fw_status->tx_results_counter = int_fw_status->tx_results_counter;
1145 fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
1146
1147 fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
1148 fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
1149 fw_status->link_fast_bitmap =
1150 le32_to_cpu(int_fw_status->link_fast_bitmap);
1151 fw_status->total_released_blks =
1152 le32_to_cpu(int_fw_status->total_released_blks);
1153 fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
1154
1155 fw_status->counters.tx_released_pkts =
1156 int_fw_status->counters.tx_released_pkts;
1157 fw_status->counters.tx_lnk_free_pkts =
1158 int_fw_status->counters.tx_lnk_free_pkts;
1159 fw_status->counters.tx_voice_released_blks =
1160 int_fw_status->counters.tx_voice_released_blks;
1161 fw_status->counters.tx_last_rate =
1162 int_fw_status->counters.tx_last_rate;
1163
1164 fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
1165
1166 fw_status->priv = &int_fw_status->priv;
1167}
1168
1136static void wl18xx_set_tx_desc_csum(struct wl1271 *wl, 1169static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
1137 struct wl1271_tx_hw_descr *desc, 1170 struct wl1271_tx_hw_descr *desc,
1138 struct sk_buff *skb) 1171 struct sk_buff *skb)
@@ -1572,7 +1605,7 @@ static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
1572{ 1605{
1573 u8 thold; 1606 u8 thold;
1574 struct wl18xx_fw_status_priv *status_priv = 1607 struct wl18xx_fw_status_priv *status_priv =
1575 (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv; 1608 (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
1576 u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap); 1609 u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
1577 1610
1578 /* suspended links are never high priority */ 1611 /* suspended links are never high priority */
@@ -1594,7 +1627,7 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
1594{ 1627{
1595 u8 thold; 1628 u8 thold;
1596 struct wl18xx_fw_status_priv *status_priv = 1629 struct wl18xx_fw_status_priv *status_priv =
1597 (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv; 1630 (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
1598 u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap); 1631 u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
1599 1632
1600 if (test_bit(hlid, (unsigned long *)&suspend_bitmap)) 1633 if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
@@ -1632,6 +1665,7 @@ static struct wlcore_ops wl18xx_ops = {
1632 .tx_immediate_compl = wl18xx_tx_immediate_completion, 1665 .tx_immediate_compl = wl18xx_tx_immediate_completion,
1633 .tx_delayed_compl = NULL, 1666 .tx_delayed_compl = NULL,
1634 .hw_init = wl18xx_hw_init, 1667 .hw_init = wl18xx_hw_init,
1668 .convert_fw_status = wl18xx_convert_fw_status,
1635 .set_tx_desc_csum = wl18xx_set_tx_desc_csum, 1669 .set_tx_desc_csum = wl18xx_set_tx_desc_csum,
1636 .get_pg_ver = wl18xx_get_pg_ver, 1670 .get_pg_ver = wl18xx_get_pg_ver,
1637 .set_rx_csum = wl18xx_set_rx_csum, 1671 .set_rx_csum = wl18xx_set_rx_csum,
@@ -1713,19 +1747,62 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
1713 }, 1747 },
1714}; 1748};
1715 1749
1750static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
1751 {
1752 .max = 3,
1753 .types = BIT(NL80211_IFTYPE_STATION),
1754 },
1755 {
1756 .max = 1,
1757 .types = BIT(NL80211_IFTYPE_AP) |
1758 BIT(NL80211_IFTYPE_P2P_GO) |
1759 BIT(NL80211_IFTYPE_P2P_CLIENT),
1760 },
1761};
1762
1763static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
1764 {
1765 .max = 2,
1766 .types = BIT(NL80211_IFTYPE_AP),
1767 },
1768};
1769
1770static const struct ieee80211_iface_combination
1771wl18xx_iface_combinations[] = {
1772 {
1773 .max_interfaces = 3,
1774 .limits = wl18xx_iface_limits,
1775 .n_limits = ARRAY_SIZE(wl18xx_iface_limits),
1776 .num_different_channels = 2,
1777 },
1778 {
1779 .max_interfaces = 2,
1780 .limits = wl18xx_iface_ap_limits,
1781 .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
1782 .num_different_channels = 1,
1783 }
1784};
1785
1716static int wl18xx_setup(struct wl1271 *wl) 1786static int wl18xx_setup(struct wl1271 *wl)
1717{ 1787{
1718 struct wl18xx_priv *priv = wl->priv; 1788 struct wl18xx_priv *priv = wl->priv;
1719 int ret; 1789 int ret;
1720 1790
1791 BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS);
1792 BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS);
1793
1721 wl->rtable = wl18xx_rtable; 1794 wl->rtable = wl18xx_rtable;
1722 wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS; 1795 wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
1723 wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS; 1796 wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
1724 wl->num_channels = 2; 1797 wl->num_links = WL18XX_MAX_LINKS;
1798 wl->max_ap_stations = WL18XX_MAX_AP_STATIONS;
1799 wl->iface_combinations = wl18xx_iface_combinations;
1800 wl->n_iface_combinations = ARRAY_SIZE(wl18xx_iface_combinations);
1725 wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES; 1801 wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
1726 wl->band_rate_to_idx = wl18xx_band_rate_to_idx; 1802 wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
1727 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX; 1803 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
1728 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0; 1804 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
1805 wl->fw_status_len = sizeof(struct wl18xx_fw_status);
1729 wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv); 1806 wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
1730 wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics); 1807 wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
1731 wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv); 1808 wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 57c694396647..be1ebd55ac88 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -32,7 +32,7 @@ static
32void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif, 32void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
33 struct ieee80211_tx_rate *rate) 33 struct ieee80211_tx_rate *rate)
34{ 34{
35 u8 fw_rate = wl->fw_status_2->counters.tx_last_rate; 35 u8 fw_rate = wl->fw_status->counters.tx_last_rate;
36 36
37 if (fw_rate > CONF_HW_RATE_INDEX_MAX) { 37 if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
38 wl1271_error("last Tx rate invalid: %d", fw_rate); 38 wl1271_error("last Tx rate invalid: %d", fw_rate);
@@ -139,7 +139,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
139void wl18xx_tx_immediate_complete(struct wl1271 *wl) 139void wl18xx_tx_immediate_complete(struct wl1271 *wl)
140{ 140{
141 struct wl18xx_fw_status_priv *status_priv = 141 struct wl18xx_fw_status_priv *status_priv =
142 (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv; 142 (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
143 struct wl18xx_priv *priv = wl->priv; 143 struct wl18xx_priv *priv = wl->priv;
144 u8 i; 144 u8 i;
145 145
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 9204e07ee432..eb7cfe817010 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
26 26
27/* minimum FW required for driver */ 27/* minimum FW required for driver */
28#define WL18XX_CHIP_VER 8 28#define WL18XX_CHIP_VER 8
29#define WL18XX_IFTYPE_VER 5 29#define WL18XX_IFTYPE_VER 8
30#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE 30#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
31#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE 31#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
32#define WL18XX_MINOR_VER 39 32#define WL18XX_MINOR_VER 13
33 33
34#define WL18XX_CMD_MAX_SIZE 740 34#define WL18XX_CMD_MAX_SIZE 740
35 35
@@ -40,7 +40,10 @@
40 40
41#define WL18XX_NUM_MAC_ADDRESSES 3 41#define WL18XX_NUM_MAC_ADDRESSES 3
42 42
43#define WL18XX_RX_BA_MAX_SESSIONS 5 43#define WL18XX_RX_BA_MAX_SESSIONS 13
44
45#define WL18XX_MAX_AP_STATIONS 10
46#define WL18XX_MAX_LINKS 16
44 47
45struct wl18xx_priv { 48struct wl18xx_priv {
46 /* buffer for sending commands to FW */ 49 /* buffer for sending commands to FW */
@@ -109,6 +112,59 @@ struct wl18xx_fw_status_priv {
109 u8 padding[3]; 112 u8 padding[3];
110}; 113};
111 114
115struct wl18xx_fw_packet_counters {
116 /* Cumulative counter of released packets per AC */
117 u8 tx_released_pkts[NUM_TX_QUEUES];
118
119 /* Cumulative counter of freed packets per HLID */
120 u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
121
122 /* Cumulative counter of released Voice memory blocks */
123 u8 tx_voice_released_blks;
124
125 /* Tx rate of the last transmitted packet */
126 u8 tx_last_rate;
127
128 u8 padding[2];
129} __packed;
130
131/* FW status registers */
132struct wl18xx_fw_status {
133 __le32 intr;
134 u8 fw_rx_counter;
135 u8 drv_rx_counter;
136 u8 reserved;
137 u8 tx_results_counter;
138 __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
139
140 __le32 fw_localtime;
141
142 /*
143 * A bitmap (where each bit represents a single HLID)
144 * to indicate if the station is in PS mode.
145 */
146 __le32 link_ps_bitmap;
147
148 /*
149 * A bitmap (where each bit represents a single HLID) to indicate
150 * if the station is in Fast mode
151 */
152 __le32 link_fast_bitmap;
153
154 /* Cumulative counter of total released mem blocks since FW-reset */
155 __le32 total_released_blks;
156
157 /* Size (in Memory Blocks) of TX pool */
158 __le32 tx_total;
159
160 struct wl18xx_fw_packet_counters counters;
161
162 __le32 log_start_addr;
163
164 /* Private status to be used by the lower drivers */
165 struct wl18xx_fw_status_priv priv;
166} __packed;
167
112#define WL18XX_PHY_VERSION_MAX_LEN 20 168#define WL18XX_PHY_VERSION_MAX_LEN 20
113 169
114struct wl18xx_static_data_priv { 170struct wl18xx_static_data_priv {
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index ec83675a2446..b924ceadc02c 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -358,7 +358,8 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
358 struct acx_beacon_filter_option *beacon_filter = NULL; 358 struct acx_beacon_filter_option *beacon_filter = NULL;
359 int ret = 0; 359 int ret = 0;
360 360
361 wl1271_debug(DEBUG_ACX, "acx beacon filter opt"); 361 wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d",
362 enable_filter);
362 363
363 if (enable_filter && 364 if (enable_filter &&
364 wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED) 365 wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
@@ -1591,7 +1592,8 @@ out:
1591 return ret; 1592 return ret;
1592} 1593}
1593 1594
1594int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr) 1595int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
1596 struct wl12xx_vif *wlvif, u8 *addr)
1595{ 1597{
1596 struct wl1271_acx_inconnection_sta *acx = NULL; 1598 struct wl1271_acx_inconnection_sta *acx = NULL;
1597 int ret; 1599 int ret;
@@ -1603,6 +1605,7 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
1603 return -ENOMEM; 1605 return -ENOMEM;
1604 1606
1605 memcpy(acx->addr, addr, ETH_ALEN); 1607 memcpy(acx->addr, addr, ETH_ALEN);
1608 acx->role_id = wlvif->role_id;
1606 1609
1607 ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST, 1610 ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
1608 acx, sizeof(*acx)); 1611 acx, sizeof(*acx));
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 6dcfad9b0472..954d57ec98f4 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -824,7 +824,8 @@ struct wl1271_acx_inconnection_sta {
824 struct acx_header header; 824 struct acx_header header;
825 825
826 u8 addr[ETH_ALEN]; 826 u8 addr[ETH_ALEN];
827 u8 padding1[2]; 827 u8 role_id;
828 u8 padding;
828} __packed; 829} __packed;
829 830
830/* 831/*
@@ -1118,7 +1119,8 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1118 bool enable); 1119 bool enable);
1119int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif); 1120int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1120int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif); 1121int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1121int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr); 1122int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
1123 struct wl12xx_vif *wlvif, u8 *addr);
1122int wl1271_acx_fm_coex(struct wl1271 *wl); 1124int wl1271_acx_fm_coex(struct wl1271 *wl);
1123int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); 1125int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
1124int wl12xx_acx_config_hangover(struct wl1271 *wl); 1126int wl12xx_acx_config_hangover(struct wl1271 *wl);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 9b2ecf52449f..40dc30f4faaa 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,8 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
60 u16 status; 60 u16 status;
61 u16 poll_count = 0; 61 u16 poll_count = 0;
62 62
63 if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING && 63 if (unlikely(wl->state == WLCORE_STATE_RESTARTING &&
64 id != CMD_STOP_FWLOGGER)) 64 id != CMD_STOP_FWLOGGER))
65 return -EIO; 65 return -EIO;
66 66
67 cmd = buf; 67 cmd = buf;
@@ -312,8 +312,8 @@ static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
312int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) 312int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
313{ 313{
314 unsigned long flags; 314 unsigned long flags;
315 u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS); 315 u8 link = find_first_zero_bit(wl->links_map, wl->num_links);
316 if (link >= WL12XX_MAX_LINKS) 316 if (link >= wl->num_links)
317 return -EBUSY; 317 return -EBUSY;
318 318
319 wl->session_ids[link] = wlcore_get_new_session_id(wl, link); 319 wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
@@ -324,9 +324,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
324 __set_bit(link, wlvif->links_map); 324 __set_bit(link, wlvif->links_map);
325 spin_unlock_irqrestore(&wl->wl_lock, flags); 325 spin_unlock_irqrestore(&wl->wl_lock, flags);
326 326
327 /* take the last "freed packets" value from the current FW status */ 327 /*
328 wl->links[link].prev_freed_pkts = 328 * take the last "freed packets" value from the current FW status.
329 wl->fw_status_2->counters.tx_lnk_free_pkts[link]; 329 * on recovery, we might not have fw_status yet, and
330 * tx_lnk_free_pkts will be NULL. check for it.
331 */
332 if (wl->fw_status->counters.tx_lnk_free_pkts)
333 wl->links[link].prev_freed_pkts =
334 wl->fw_status->counters.tx_lnk_free_pkts[link];
330 wl->links[link].wlvif = wlvif; 335 wl->links[link].wlvif = wlvif;
331 336
332 /* 337 /*
@@ -1527,6 +1532,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1527 cmd->sp_len = sta->max_sp; 1532 cmd->sp_len = sta->max_sp;
1528 cmd->wmm = sta->wme ? 1 : 0; 1533 cmd->wmm = sta->wme ? 1 : 0;
1529 cmd->session_id = wl->session_ids[hlid]; 1534 cmd->session_id = wl->session_ids[hlid];
1535 cmd->role_id = wlvif->role_id;
1530 1536
1531 for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++) 1537 for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
1532 if (sta->wme && (sta->uapsd_queues & BIT(i))) 1538 if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1563,7 +1569,8 @@ out:
1563 return ret; 1569 return ret;
1564} 1570}
1565 1571
1566int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid) 1572int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1573 u8 hlid)
1567{ 1574{
1568 struct wl12xx_cmd_remove_peer *cmd; 1575 struct wl12xx_cmd_remove_peer *cmd;
1569 int ret; 1576 int ret;
@@ -1581,6 +1588,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
1581 /* We never send a deauth, mac80211 is in charge of this */ 1588 /* We never send a deauth, mac80211 is in charge of this */
1582 cmd->reason_opcode = 0; 1589 cmd->reason_opcode = 0;
1583 cmd->send_deauth_flag = 0; 1590 cmd->send_deauth_flag = 0;
1591 cmd->role_id = wlvif->role_id;
1584 1592
1585 ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0); 1593 ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
1586 if (ret < 0) { 1594 if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 323d4a856e4b..b084830a61cf 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -88,7 +88,8 @@ int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
88int wl12xx_croc(struct wl1271 *wl, u8 role_id); 88int wl12xx_croc(struct wl1271 *wl, u8 role_id);
89int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, 89int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
90 struct ieee80211_sta *sta, u8 hlid); 90 struct ieee80211_sta *sta, u8 hlid);
91int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid); 91int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
92 u8 hlid);
92void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, 93void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
93 enum ieee80211_band band); 94 enum ieee80211_band band);
94int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl); 95int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
@@ -206,7 +207,7 @@ enum cmd_templ {
206#define WL1271_COMMAND_TIMEOUT 2000 207#define WL1271_COMMAND_TIMEOUT 2000
207#define WL1271_CMD_TEMPL_DFLT_SIZE 252 208#define WL1271_CMD_TEMPL_DFLT_SIZE 252
208#define WL1271_CMD_TEMPL_MAX_SIZE 512 209#define WL1271_CMD_TEMPL_MAX_SIZE 512
209#define WL1271_EVENT_TIMEOUT 1500 210#define WL1271_EVENT_TIMEOUT 5000
210 211
211struct wl1271_cmd_header { 212struct wl1271_cmd_header {
212 __le16 id; 213 __le16 id;
@@ -594,6 +595,8 @@ struct wl12xx_cmd_add_peer {
594 u8 sp_len; 595 u8 sp_len;
595 u8 wmm; 596 u8 wmm;
596 u8 session_id; 597 u8 session_id;
598 u8 role_id;
599 u8 padding[3];
597} __packed; 600} __packed;
598 601
599struct wl12xx_cmd_remove_peer { 602struct wl12xx_cmd_remove_peer {
@@ -602,7 +605,7 @@ struct wl12xx_cmd_remove_peer {
602 u8 hlid; 605 u8 hlid;
603 u8 reason_opcode; 606 u8 reason_opcode;
604 u8 send_deauth_flag; 607 u8 send_deauth_flag;
605 u8 padding1; 608 u8 role_id;
606} __packed; 609} __packed;
607 610
608/* 611/*
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 8d3b34965db3..1f9a36031b06 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -67,7 +67,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
67 u8 hlid; 67 u8 hlid;
68 struct wl1271_link *lnk; 68 struct wl1271_link *lnk;
69 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, 69 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
70 WL12XX_MAX_LINKS) { 70 wl->num_links) {
71 lnk = &wl->links[hlid]; 71 lnk = &wl->links[hlid];
72 if (!lnk->ba_bitmap) 72 if (!lnk->ba_bitmap)
73 continue; 73 continue;
@@ -172,7 +172,7 @@ static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
172 const u8 *addr; 172 const u8 *addr;
173 int h; 173 int h;
174 174
175 for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) { 175 for_each_set_bit(h, &sta_bitmap, wl->num_links) {
176 bool found = false; 176 bool found = false;
177 /* find the ap vif connected to this sta */ 177 /* find the ap vif connected to this sta */
178 wl12xx_for_each_wlvif_ap(wl, wlvif) { 178 wl12xx_for_each_wlvif_ap(wl, wlvif) {
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 51f8d634d32f..1555ff970050 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -106,6 +106,15 @@ wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
106 return 0; 106 return 0;
107} 107}
108 108
109static inline void
110wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
111 struct wl_fw_status *fw_status)
112{
113 BUG_ON(!wl->ops->convert_fw_status);
114
115 wl->ops->convert_fw_status(wl, raw_fw_status, fw_status);
116}
117
109static inline u32 118static inline u32
110wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif) 119wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
111{ 120{
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 7699f9d07e26..199e94120864 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -287,8 +287,8 @@ static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
287 if (ret < 0) 287 if (ret < 0)
288 return ret; 288 return ret;
289 289
290 /* enable beacon filtering */ 290 /* disable beacon filtering until we get the first beacon */
291 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); 291 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
292 if (ret < 0) 292 if (ret < 0)
293 return ret; 293 return ret;
294 294
@@ -462,7 +462,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
462 * If the basic rates contain OFDM rates, use OFDM only 462 * If the basic rates contain OFDM rates, use OFDM only
463 * rates for unicast TX as well. Else use all supported rates. 463 * rates for unicast TX as well. Else use all supported rates.
464 */ 464 */
465 if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES)) 465 if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
466 supported_rates = CONF_TX_OFDM_RATES; 466 supported_rates = CONF_TX_OFDM_RATES;
467 else 467 else
468 supported_rates = CONF_TX_ENABLED_RATES; 468 supported_rates = CONF_TX_ENABLED_RATES;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 07e3d6a049ad..0305729d0986 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -60,7 +60,9 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
60{ 60{
61 int ret; 61 int ret;
62 62
63 if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags)) 63 if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
64 WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
65 addr != HW_ACCESS_ELP_CTRL_REG)))
64 return -EIO; 66 return -EIO;
65 67
66 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed); 68 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
@@ -76,7 +78,9 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
76{ 78{
77 int ret; 79 int ret;
78 80
79 if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags)) 81 if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
82 WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
83 addr != HW_ACCESS_ELP_CTRL_REG)))
80 return -EIO; 84 return -EIO;
81 85
82 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed); 86 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b46b3116cc55..ed88d3913483 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -345,24 +345,24 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
345 * Start high-level PS if the STA is asleep with enough blocks in FW. 345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this 346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem. 347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 3 active links, since we must 348 * Note that a single connected STA means 2*ap_count + 1 active links,
349 * account for the global and broadcast AP links. The "fw_ps" check 349 * since we must account for the global and broadcast AP links
350 * assures us the third link is a STA connected to the AP. Otherwise 350 * for each AP. The "fw_ps" check assures us the other link is a STA
351 * the FW would not set the PSM bit. 351 * connected to the AP. Otherwise the FW would not set the PSM bit.
352 */ 352 */
353 else if (wl->active_link_count > 3 && fw_ps && 353 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true); 355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
356} 356}
357 357
358static void wl12xx_irq_update_links_status(struct wl1271 *wl, 358static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif, 359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status_2 *status) 360 struct wl_fw_status *status)
361{ 361{
362 u32 cur_fw_ps_map; 362 u32 cur_fw_ps_map;
363 u8 hlid; 363 u8 hlid;
364 364
365 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap); 365 cur_fw_ps_map = status->link_ps_bitmap;
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) { 366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM, 367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x", 368 "link ps prev 0x%x cur 0x%x changed 0x%x",
@@ -372,77 +372,73 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
372 wl->ap_fw_ps_map = cur_fw_ps_map; 372 wl->ap_fw_ps_map = cur_fw_ps_map;
373 } 373 }
374 374
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) 375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, 376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts); 377 wl->links[hlid].allocated_pkts);
378} 378}
379 379
380static int wlcore_fw_status(struct wl1271 *wl, 380static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
381 struct wl_fw_status_1 *status_1,
382 struct wl_fw_status_2 *status_2)
383{ 381{
384 struct wl12xx_vif *wlvif; 382 struct wl12xx_vif *wlvif;
385 struct timespec ts; 383 struct timespec ts;
386 u32 old_tx_blk_count = wl->tx_blocks_available; 384 u32 old_tx_blk_count = wl->tx_blocks_available;
387 int avail, freed_blocks; 385 int avail, freed_blocks;
388 int i; 386 int i;
389 size_t status_len;
390 int ret; 387 int ret;
391 struct wl1271_link *lnk; 388 struct wl1271_link *lnk;
392 389
393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + 390 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
394 sizeof(*status_2) + wl->fw_status_priv_len; 391 wl->raw_fw_status,
395 392 wl->fw_status_len, false);
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
397 status_len, false);
398 if (ret < 0) 393 if (ret < 0)
399 return ret; 394 return ret;
400 395
396 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
397
401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)", 399 "drv_rx_counter = %d, tx_results_counter = %d)",
403 status_1->intr, 400 status->intr,
404 status_1->fw_rx_counter, 401 status->fw_rx_counter,
405 status_1->drv_rx_counter, 402 status->drv_rx_counter,
406 status_1->tx_results_counter); 403 status->tx_results_counter);
407 404
408 for (i = 0; i < NUM_TX_QUEUES; i++) { 405 for (i = 0; i < NUM_TX_QUEUES; i++) {
409 /* prevent wrap-around in freed-packets counter */ 406 /* prevent wrap-around in freed-packets counter */
410 wl->tx_allocated_pkts[i] -= 407 wl->tx_allocated_pkts[i] -=
411 (status_2->counters.tx_released_pkts[i] - 408 (status->counters.tx_released_pkts[i] -
412 wl->tx_pkts_freed[i]) & 0xff; 409 wl->tx_pkts_freed[i]) & 0xff;
413 410
414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i]; 411 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
415 } 412 }
416 413
417 414
418 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) { 415 for_each_set_bit(i, wl->links_map, wl->num_links) {
419 u8 diff; 416 u8 diff;
420 lnk = &wl->links[i]; 417 lnk = &wl->links[i];
421 418
422 /* prevent wrap-around in freed-packets counter */ 419 /* prevent wrap-around in freed-packets counter */
423 diff = (status_2->counters.tx_lnk_free_pkts[i] - 420 diff = (status->counters.tx_lnk_free_pkts[i] -
424 lnk->prev_freed_pkts) & 0xff; 421 lnk->prev_freed_pkts) & 0xff;
425 422
426 if (diff == 0) 423 if (diff == 0)
427 continue; 424 continue;
428 425
429 lnk->allocated_pkts -= diff; 426 lnk->allocated_pkts -= diff;
430 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i]; 427 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
431 428
432 /* accumulate the prev_freed_pkts counter */ 429 /* accumulate the prev_freed_pkts counter */
433 lnk->total_freed_pkts += diff; 430 lnk->total_freed_pkts += diff;
434 } 431 }
435 432
436 /* prevent wrap-around in total blocks counter */ 433 /* prevent wrap-around in total blocks counter */
437 if (likely(wl->tx_blocks_freed <= 434 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
438 le32_to_cpu(status_2->total_released_blks))) 435 freed_blocks = status->total_released_blks -
439 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
440 wl->tx_blocks_freed; 436 wl->tx_blocks_freed;
441 else 437 else
442 freed_blocks = 0x100000000LL - wl->tx_blocks_freed + 438 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
443 le32_to_cpu(status_2->total_released_blks); 439 status->total_released_blks;
444 440
445 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks); 441 wl->tx_blocks_freed = status->total_released_blks;
446 442
447 wl->tx_allocated_blocks -= freed_blocks; 443 wl->tx_allocated_blocks -= freed_blocks;
448 444
@@ -458,7 +454,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
458 cancel_delayed_work(&wl->tx_watchdog_work); 454 cancel_delayed_work(&wl->tx_watchdog_work);
459 } 455 }
460 456
461 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks; 457 avail = status->tx_total - wl->tx_allocated_blocks;
462 458
463 /* 459 /*
464 * The FW might change the total number of TX memblocks before 460 * The FW might change the total number of TX memblocks before
@@ -477,15 +473,15 @@ static int wlcore_fw_status(struct wl1271 *wl,
477 473
478 /* for AP update num of allocated TX blocks per link and ps status */ 474 /* for AP update num of allocated TX blocks per link and ps status */
479 wl12xx_for_each_wlvif_ap(wl, wlvif) { 475 wl12xx_for_each_wlvif_ap(wl, wlvif) {
480 wl12xx_irq_update_links_status(wl, wlvif, status_2); 476 wl12xx_irq_update_links_status(wl, wlvif, status);
481 } 477 }
482 478
483 /* update the host-chipset time offset */ 479 /* update the host-chipset time offset */
484 getnstimeofday(&ts); 480 getnstimeofday(&ts);
485 wl->time_offset = (timespec_to_ns(&ts) >> 10) - 481 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
486 (s64)le32_to_cpu(status_2->fw_localtime); 482 (s64)(status->fw_localtime);
487 483
488 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap); 484 wl->fw_fast_lnk_map = status->link_fast_bitmap;
489 485
490 return 0; 486 return 0;
491} 487}
@@ -549,13 +545,13 @@ static int wlcore_irq_locked(struct wl1271 *wl)
549 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 545 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
550 smp_mb__after_clear_bit(); 546 smp_mb__after_clear_bit();
551 547
552 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2); 548 ret = wlcore_fw_status(wl, wl->fw_status);
553 if (ret < 0) 549 if (ret < 0)
554 goto out; 550 goto out;
555 551
556 wlcore_hw_tx_immediate_compl(wl); 552 wlcore_hw_tx_immediate_compl(wl);
557 553
558 intr = le32_to_cpu(wl->fw_status_1->intr); 554 intr = wl->fw_status->intr;
559 intr &= WLCORE_ALL_INTR_MASK; 555 intr &= WLCORE_ALL_INTR_MASK;
560 if (!intr) { 556 if (!intr) {
561 done = true; 557 done = true;
@@ -584,7 +580,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
584 if (likely(intr & WL1271_ACX_INTR_DATA)) { 580 if (likely(intr & WL1271_ACX_INTR_DATA)) {
585 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 581 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
586 582
587 ret = wlcore_rx(wl, wl->fw_status_1); 583 ret = wlcore_rx(wl, wl->fw_status);
588 if (ret < 0) 584 if (ret < 0)
589 goto out; 585 goto out;
590 586
@@ -786,10 +782,11 @@ out:
786 782
787void wl12xx_queue_recovery_work(struct wl1271 *wl) 783void wl12xx_queue_recovery_work(struct wl1271 *wl)
788{ 784{
789 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
790
791 /* Avoid a recursive recovery */ 785 /* Avoid a recursive recovery */
792 if (wl->state == WLCORE_STATE_ON) { 786 if (wl->state == WLCORE_STATE_ON) {
787 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
788 &wl->flags));
789
793 wl->state = WLCORE_STATE_RESTARTING; 790 wl->state = WLCORE_STATE_RESTARTING;
794 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 791 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
795 wl1271_ps_elp_wakeup(wl); 792 wl1271_ps_elp_wakeup(wl);
@@ -803,7 +800,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
803 size_t len; 800 size_t len;
804 801
805 /* Make sure we have enough room */ 802 /* Make sure we have enough room */
806 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size)); 803 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
807 804
808 /* Fill the FW log file, consumed by the sysfs fwlog entry */ 805 /* Fill the FW log file, consumed by the sysfs fwlog entry */
809 memcpy(wl->fwlog + wl->fwlog_size, memblock, len); 806 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
@@ -843,11 +840,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
843 wl12xx_cmd_stop_fwlog(wl); 840 wl12xx_cmd_stop_fwlog(wl);
844 841
845 /* Read the first memory block address */ 842 /* Read the first memory block address */
846 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2); 843 ret = wlcore_fw_status(wl, wl->fw_status);
847 if (ret < 0) 844 if (ret < 0)
848 goto out; 845 goto out;
849 846
850 addr = le32_to_cpu(wl->fw_status_2->log_start_addr); 847 addr = wl->fw_status->log_start_addr;
851 if (!addr) 848 if (!addr)
852 goto out; 849 goto out;
853 850
@@ -990,23 +987,23 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
990 987
991static int wl1271_setup(struct wl1271 *wl) 988static int wl1271_setup(struct wl1271 *wl)
992{ 989{
993 wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + 990 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
994 sizeof(*wl->fw_status_2) + 991 if (!wl->raw_fw_status)
995 wl->fw_status_priv_len, GFP_KERNEL); 992 goto err;
996 if (!wl->fw_status_1)
997 return -ENOMEM;
998 993
999 wl->fw_status_2 = (struct wl_fw_status_2 *) 994 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1000 (((u8 *) wl->fw_status_1) + 995 if (!wl->fw_status)
1001 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc)); 996 goto err;
1002 997
1003 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); 998 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1004 if (!wl->tx_res_if) { 999 if (!wl->tx_res_if)
1005 kfree(wl->fw_status_1); 1000 goto err;
1006 return -ENOMEM;
1007 }
1008 1001
1009 return 0; 1002 return 0;
1003err:
1004 kfree(wl->fw_status);
1005 kfree(wl->raw_fw_status);
1006 return -ENOMEM;
1010} 1007}
1011 1008
1012static int wl12xx_set_power_on(struct wl1271 *wl) 1009static int wl12xx_set_power_on(struct wl1271 *wl)
@@ -1767,6 +1764,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1767 flush_work(&wl->tx_work); 1764 flush_work(&wl->tx_work);
1768 flush_delayed_work(&wl->elp_work); 1765 flush_delayed_work(&wl->elp_work);
1769 1766
1767 /*
1768 * Cancel the watchdog even if above tx_flush failed. We will detect
1769 * it on resume anyway.
1770 */
1771 cancel_delayed_work(&wl->tx_watchdog_work);
1772
1770 return 0; 1773 return 0;
1771} 1774}
1772 1775
@@ -1824,6 +1827,13 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1824 1827
1825out: 1828out:
1826 wl->wow_enabled = false; 1829 wl->wow_enabled = false;
1830
1831 /*
1832 * Set a flag to re-init the watchdog on the first Tx after resume.
1833 * That way we avoid possible conditions where Tx-complete interrupts
1834 * fail to arrive and we perform a spurious recovery.
1835 */
1836 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1827 mutex_unlock(&wl->mutex); 1837 mutex_unlock(&wl->mutex);
1828 1838
1829 return 0; 1839 return 0;
@@ -1914,6 +1924,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1914 memset(wl->links_map, 0, sizeof(wl->links_map)); 1924 memset(wl->links_map, 0, sizeof(wl->links_map));
1915 memset(wl->roc_map, 0, sizeof(wl->roc_map)); 1925 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1916 memset(wl->session_ids, 0, sizeof(wl->session_ids)); 1926 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1927 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1917 wl->active_sta_count = 0; 1928 wl->active_sta_count = 0;
1918 wl->active_link_count = 0; 1929 wl->active_link_count = 0;
1919 1930
@@ -1938,9 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1938 1949
1939 wl1271_debugfs_reset(wl); 1950 wl1271_debugfs_reset(wl);
1940 1951
1941 kfree(wl->fw_status_1); 1952 kfree(wl->raw_fw_status);
1942 wl->fw_status_1 = NULL; 1953 wl->raw_fw_status = NULL;
1943 wl->fw_status_2 = NULL; 1954 kfree(wl->fw_status);
1955 wl->fw_status = NULL;
1944 kfree(wl->tx_res_if); 1956 kfree(wl->tx_res_if);
1945 wl->tx_res_if = NULL; 1957 wl->tx_res_if = NULL;
1946 kfree(wl->target_mem_map); 1958 kfree(wl->target_mem_map);
@@ -2571,10 +2583,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2571 ieee80211_scan_completed(wl->hw, true); 2583 ieee80211_scan_completed(wl->hw, true);
2572 } 2584 }
2573 2585
2574 if (wl->sched_vif == wlvif) { 2586 if (wl->sched_vif == wlvif)
2575 ieee80211_sched_scan_stopped(wl->hw);
2576 wl->sched_vif = NULL; 2587 wl->sched_vif = NULL;
2577 }
2578 2588
2579 if (wl->roc_vif == vif) { 2589 if (wl->roc_vif == vif) {
2580 wl->roc_vif = NULL; 2590 wl->roc_vif = NULL;
@@ -2931,6 +2941,11 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2931 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); 2941 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2932 if (ret < 0) 2942 if (ret < 0)
2933 return ret; 2943 return ret;
2944
2945 /* disable beacon filtering */
2946 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2947 if (ret < 0)
2948 return ret;
2934 } 2949 }
2935 2950
2936 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { 2951 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
@@ -3463,6 +3478,10 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3463 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d", 3478 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3464 key_idx); 3479 key_idx);
3465 3480
3481 /* we don't handle unsetting of default key */
3482 if (key_idx == -1)
3483 return;
3484
3466 mutex_lock(&wl->mutex); 3485 mutex_lock(&wl->mutex);
3467 3486
3468 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3487 if (unlikely(wl->state != WLCORE_STATE_ON)) {
@@ -3649,8 +3668,8 @@ out:
3649 return ret; 3668 return ret;
3650} 3669}
3651 3670
3652static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, 3671static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3653 struct ieee80211_vif *vif) 3672 struct ieee80211_vif *vif)
3654{ 3673{
3655 struct wl1271 *wl = hw->priv; 3674 struct wl1271 *wl = hw->priv;
3656 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3675 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
@@ -3672,6 +3691,8 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3672 wl1271_ps_elp_sleep(wl); 3691 wl1271_ps_elp_sleep(wl);
3673out: 3692out:
3674 mutex_unlock(&wl->mutex); 3693 mutex_unlock(&wl->mutex);
3694
3695 return 0;
3675} 3696}
3676 3697
3677static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 3698static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -4298,6 +4319,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4298 } 4319 }
4299 } 4320 }
4300 4321
4322 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4323 /* enable beacon filtering */
4324 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4325 if (ret < 0)
4326 goto out;
4327 }
4328
4301 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); 4329 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4302 if (ret < 0) 4330 if (ret < 0)
4303 goto out; 4331 goto out;
@@ -4651,7 +4679,7 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
4651 int ret; 4679 int ret;
4652 4680
4653 4681
4654 if (wl->active_sta_count >= AP_MAX_STATIONS) { 4682 if (wl->active_sta_count >= wl->max_ap_stations) {
4655 wl1271_warning("could not allocate HLID - too much stations"); 4683 wl1271_warning("could not allocate HLID - too much stations");
4656 return -EBUSY; 4684 return -EBUSY;
4657 } 4685 }
@@ -4754,7 +4782,7 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
4754 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map))) 4782 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4755 return -EINVAL; 4783 return -EINVAL;
4756 4784
4757 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid); 4785 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4758 if (ret < 0) 4786 if (ret < 0)
4759 return ret; 4787 return ret;
4760 4788
@@ -5679,28 +5707,6 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
5679 5707
5680} 5708}
5681 5709
5682static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5683 {
5684 .max = 3,
5685 .types = BIT(NL80211_IFTYPE_STATION),
5686 },
5687 {
5688 .max = 1,
5689 .types = BIT(NL80211_IFTYPE_AP) |
5690 BIT(NL80211_IFTYPE_P2P_GO) |
5691 BIT(NL80211_IFTYPE_P2P_CLIENT),
5692 },
5693};
5694
5695static struct ieee80211_iface_combination
5696wlcore_iface_combinations[] = {
5697 {
5698 .max_interfaces = 3,
5699 .limits = wlcore_iface_limits,
5700 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5701 },
5702};
5703
5704static int wl1271_init_ieee80211(struct wl1271 *wl) 5710static int wl1271_init_ieee80211(struct wl1271 *wl)
5705{ 5711{
5706 int i; 5712 int i;
@@ -5733,7 +5739,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5733 IEEE80211_HW_AP_LINK_PS | 5739 IEEE80211_HW_AP_LINK_PS |
5734 IEEE80211_HW_AMPDU_AGGREGATION | 5740 IEEE80211_HW_AMPDU_AGGREGATION |
5735 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | 5741 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5736 IEEE80211_HW_QUEUE_CONTROL; 5742 IEEE80211_HW_QUEUE_CONTROL |
5743 IEEE80211_HW_CHANCTX_STA_CSA;
5737 5744
5738 wl->hw->wiphy->cipher_suites = cipher_suites; 5745 wl->hw->wiphy->cipher_suites = cipher_suites;
5739 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 5746 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5821,10 +5828,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5821 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 5828 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5822 5829
5823 /* allowed interface combinations */ 5830 /* allowed interface combinations */
5824 wlcore_iface_combinations[0].num_different_channels = wl->num_channels; 5831 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5825 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations; 5832 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5826 wl->hw->wiphy->n_iface_combinations =
5827 ARRAY_SIZE(wlcore_iface_combinations);
5828 5833
5829 SET_IEEE80211_DEV(wl->hw, wl->dev); 5834 SET_IEEE80211_DEV(wl->hw, wl->dev);
5830 5835
@@ -5844,8 +5849,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5844 int i, j, ret; 5849 int i, j, ret;
5845 unsigned int order; 5850 unsigned int order;
5846 5851
5847 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5848
5849 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 5852 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5850 if (!hw) { 5853 if (!hw) {
5851 wl1271_error("could not alloc ieee80211_hw"); 5854 wl1271_error("could not alloc ieee80211_hw");
@@ -5867,8 +5870,12 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5867 5870
5868 wl->hw = hw; 5871 wl->hw = hw;
5869 5872
5873 /*
5874 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5875 * we don't allocate any additional resource here, so that's fine.
5876 */
5870 for (i = 0; i < NUM_TX_QUEUES; i++) 5877 for (i = 0; i < NUM_TX_QUEUES; i++)
5871 for (j = 0; j < WL12XX_MAX_LINKS; j++) 5878 for (j = 0; j < WLCORE_MAX_LINKS; j++)
5872 skb_queue_head_init(&wl->links[j].tx_queue[i]); 5879 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5873 5880
5874 skb_queue_head_init(&wl->deferred_rx_queue); 5881 skb_queue_head_init(&wl->deferred_rx_queue);
@@ -6011,7 +6018,8 @@ int wlcore_free_hw(struct wl1271 *wl)
6011 kfree(wl->nvs); 6018 kfree(wl->nvs);
6012 wl->nvs = NULL; 6019 wl->nvs = NULL;
6013 6020
6014 kfree(wl->fw_status_1); 6021 kfree(wl->raw_fw_status);
6022 kfree(wl->fw_status);
6015 kfree(wl->tx_res_if); 6023 kfree(wl->tx_res_if);
6016 destroy_workqueue(wl->freezable_wq); 6024 destroy_workqueue(wl->freezable_wq);
6017 6025
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 26bfc365ba70..b52516eed7b2 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -280,7 +280,11 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
280 struct ieee80211_sta *sta; 280 struct ieee80211_sta *sta;
281 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 281 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
282 282
283 if (test_bit(hlid, &wl->ap_ps_map)) 283 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
284 return;
285
286 if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
287 test_bit(hlid, &wl->ap_ps_map))
284 return; 288 return;
285 289
286 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d " 290 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 6791a1a6afba..e125974285cc 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -203,9 +203,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
203 return is_data; 203 return is_data;
204} 204}
205 205
206int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status) 206int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
207{ 207{
208 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 208 unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
209 u32 buf_size; 209 u32 buf_size;
210 u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc; 210 u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
211 u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc; 211 u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
@@ -263,12 +263,12 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
263 wl->aggr_buf + pkt_offset, 263 wl->aggr_buf + pkt_offset,
264 pkt_len, rx_align, 264 pkt_len, rx_align,
265 &hlid) == 1) { 265 &hlid) == 1) {
266 if (hlid < WL12XX_MAX_LINKS) 266 if (hlid < wl->num_links)
267 __set_bit(hlid, active_hlids); 267 __set_bit(hlid, active_hlids);
268 else 268 else
269 WARN(1, 269 WARN(1,
270 "hlid exceeded WL12XX_MAX_LINKS " 270 "hlid (%d) exceeded MAX_LINKS\n",
271 "(%d)\n", hlid); 271 hlid);
272 } 272 }
273 273
274 wl->rx_counter++; 274 wl->rx_counter++;
@@ -302,7 +302,7 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
302{ 302{
303 int ret; 303 int ret;
304 304
305 if (wl->rx_filter_enabled[index] == enable) { 305 if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
306 wl1271_warning("Request to enable an already " 306 wl1271_warning("Request to enable an already "
307 "enabled rx filter %d", index); 307 "enabled rx filter %d", index);
308 return 0; 308 return 0;
@@ -316,7 +316,10 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
316 return ret; 316 return ret;
317 } 317 }
318 318
319 wl->rx_filter_enabled[index] = enable; 319 if (enable)
320 __set_bit(index, wl->rx_filter_enabled);
321 else
322 __clear_bit(index, wl->rx_filter_enabled);
320 323
321 return 0; 324 return 0;
322} 325}
@@ -326,7 +329,7 @@ int wl1271_rx_filter_clear_all(struct wl1271 *wl)
326 int i, ret = 0; 329 int i, ret = 0;
327 330
328 for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) { 331 for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
329 if (!wl->rx_filter_enabled[i]) 332 if (!test_bit(i, wl->rx_filter_enabled))
330 continue; 333 continue;
331 ret = wl1271_rx_filter_enable(wl, i, 0, NULL); 334 ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
332 if (ret) 335 if (ret)
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 3363f60fb7da..a3b1618db27c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -142,7 +142,7 @@ struct wl1271_rx_descriptor {
142 u8 reserved; 142 u8 reserved;
143} __packed; 143} __packed;
144 144
145int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status); 145int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
146u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 146u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
147int wl1271_rx_filter_enable(struct wl1271 *wl, 147int wl1271_rx_filter_enable(struct wl1271 *wl,
148 int index, bool enable, 148 int index, bool enable,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index b2c018dccf18..dbe826dd7c23 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -211,7 +211,7 @@ static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
211 u32 chunk_len; 211 u32 chunk_len;
212 212
213 while (len > 0) { 213 while (len > 0) {
214 chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len); 214 chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
215 215
216 cmd = &wl->buffer_cmd; 216 cmd = &wl->buffer_cmd;
217 busy_buf = wl->buffer_busyword; 217 busy_buf = wl->buffer_busyword;
@@ -285,7 +285,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
285 cmd = &commands[0]; 285 cmd = &commands[0];
286 i = 0; 286 i = 0;
287 while (len > 0) { 287 while (len > 0) {
288 chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len); 288 chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
289 289
290 *cmd = 0; 290 *cmd = 0;
291 *cmd |= WSPI_CMD_WRITE; 291 *cmd |= WSPI_CMD_WRITE;
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index 8e583497940d..24dd288d6809 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -152,7 +152,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
152 } 152 }
153 153
154 /* Seeking is not supported - old logs are not kept. Disregard pos. */ 154 /* Seeking is not supported - old logs are not kept. Disregard pos. */
155 len = min(count, (size_t)wl->fwlog_size); 155 len = min_t(size_t, count, wl->fwlog_size);
156 wl->fwlog_size -= len; 156 wl->fwlog_size -= len;
157 memcpy(buffer, wl->fwlog, len); 157 memcpy(buffer, wl->fwlog, len);
158 158
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 87cd707affa2..40b43115f835 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -101,7 +101,7 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
101 * authentication response. this way it won't get de-authed by FW 101 * authentication response. this way it won't get de-authed by FW
102 * when transmitting too soon. 102 * when transmitting too soon.
103 */ 103 */
104 wl1271_acx_set_inconnection_sta(wl, hdr->addr1); 104 wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
105 105
106 /* 106 /*
107 * ROC for 1 second on the AP channel for completing the connection. 107 * ROC for 1 second on the AP channel for completing the connection.
@@ -134,12 +134,12 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
134 * into high-level PS and clean out its TX queues. 134 * into high-level PS and clean out its TX queues.
135 * Make an exception if this is the only connected link. In this 135 * Make an exception if this is the only connected link. In this
136 * case FW-memory congestion is less of a problem. 136 * case FW-memory congestion is less of a problem.
137 * Note that a single connected STA means 3 active links, since we must 137 * Note that a single connected STA means 2*ap_count + 1 active links,
138 * account for the global and broadcast AP links. The "fw_ps" check 138 * since we must account for the global and broadcast AP links
139 * assures us the third link is a STA connected to the AP. Otherwise 139 * for each AP. The "fw_ps" check assures us the other link is a STA
140 * the FW would not set the PSM bit. 140 * connected to the AP. Otherwise the FW would not set the PSM bit.
141 */ 141 */
142 if (wl->active_link_count > 3 && fw_ps && 142 if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
143 tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 143 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
144 wl12xx_ps_link_start(wl, wlvif, hlid, true); 144 wl12xx_ps_link_start(wl, wlvif, hlid, true);
145} 145}
@@ -234,8 +234,13 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
234 wl->tx_blocks_available -= total_blocks; 234 wl->tx_blocks_available -= total_blocks;
235 wl->tx_allocated_blocks += total_blocks; 235 wl->tx_allocated_blocks += total_blocks;
236 236
237 /* If the FW was empty before, arm the Tx watchdog */ 237 /*
238 if (wl->tx_allocated_blocks == total_blocks) 238 * If the FW was empty before, arm the Tx watchdog. Also do
239 * this on the first Tx after resume, as we always cancel the
240 * watchdog on suspend.
241 */
242 if (wl->tx_allocated_blocks == total_blocks ||
243 test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
239 wl12xx_rearm_tx_watchdog_locked(wl); 244 wl12xx_rearm_tx_watchdog_locked(wl);
240 245
241 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 246 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -357,6 +362,10 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
357 ieee80211_has_protected(frame_control)) 362 ieee80211_has_protected(frame_control))
358 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 363 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
359 364
365 /* send EAPOL frames as voice */
366 if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
367 tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
368
360 desc->tx_attr = cpu_to_le16(tx_attr); 369 desc->tx_attr = cpu_to_le16(tx_attr);
361 370
362 wlcore_hw_set_tx_desc_csum(wl, desc, skb); 371 wlcore_hw_set_tx_desc_csum(wl, desc, skb);
@@ -560,11 +569,11 @@ static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
560 int i, h, start_hlid; 569 int i, h, start_hlid;
561 570
562 /* start from the link after the last one */ 571 /* start from the link after the last one */
563 start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; 572 start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
564 573
565 /* dequeue according to AC, round robin on each link */ 574 /* dequeue according to AC, round robin on each link */
566 for (i = 0; i < WL12XX_MAX_LINKS; i++) { 575 for (i = 0; i < wl->num_links; i++) {
567 h = (start_hlid + i) % WL12XX_MAX_LINKS; 576 h = (start_hlid + i) % wl->num_links;
568 577
569 /* only consider connected stations */ 578 /* only consider connected stations */
570 if (!test_bit(h, wlvif->links_map)) 579 if (!test_bit(h, wlvif->links_map))
@@ -688,8 +697,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
688 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 697 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
689 698
690 /* make sure we dequeue the same packet next time */ 699 /* make sure we dequeue the same packet next time */
691 wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % 700 wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
692 WL12XX_MAX_LINKS; 701 wl->num_links;
693 } 702 }
694 703
695 spin_lock_irqsave(&wl->wl_lock, flags); 704 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -722,7 +731,7 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
722 timeout = wl->conf.rx_streaming.duration; 731 timeout = wl->conf.rx_streaming.duration;
723 wl12xx_for_each_wlvif_sta(wl, wlvif) { 732 wl12xx_for_each_wlvif_sta(wl, wlvif) {
724 bool found = false; 733 bool found = false;
725 for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { 734 for_each_set_bit(hlid, active_hlids, wl->num_links) {
726 if (test_bit(hlid, wlvif->links_map)) { 735 if (test_bit(hlid, wlvif->links_map)) {
727 found = true; 736 found = true;
728 break; 737 break;
@@ -759,7 +768,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
759 struct wl1271_tx_hw_descr *desc; 768 struct wl1271_tx_hw_descr *desc;
760 u32 buf_offset = 0, last_len = 0; 769 u32 buf_offset = 0, last_len = 0;
761 bool sent_packets = false; 770 bool sent_packets = false;
762 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 771 unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
763 int ret = 0; 772 int ret = 0;
764 int bus_ret = 0; 773 int bus_ret = 0;
765 u8 hlid; 774 u8 hlid;
@@ -1061,7 +1070,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1061 int i; 1070 int i;
1062 1071
1063 /* TX failure */ 1072 /* TX failure */
1064 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { 1073 for_each_set_bit(i, wlvif->links_map, wl->num_links) {
1065 if (wlvif->bss_type == BSS_TYPE_AP_BSS && 1074 if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
1066 i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { 1075 i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
1067 /* this calls wl12xx_free_link */ 1076 /* this calls wl12xx_free_link */
@@ -1085,7 +1094,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
1085 1094
1086 /* only reset the queues if something bad happened */ 1095 /* only reset the queues if something bad happened */
1087 if (wl1271_tx_total_queue_count(wl) != 0) { 1096 if (wl1271_tx_total_queue_count(wl) != 0) {
1088 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1097 for (i = 0; i < wl->num_links; i++)
1089 wl1271_tx_reset_link_queues(wl, i); 1098 wl1271_tx_reset_link_queues(wl, i);
1090 1099
1091 for (i = 0; i < NUM_TX_QUEUES; i++) 1100 for (i = 0; i < NUM_TX_QUEUES; i++)
@@ -1178,7 +1187,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
1178 WL1271_TX_FLUSH_TIMEOUT / 1000); 1187 WL1271_TX_FLUSH_TIMEOUT / 1000);
1179 1188
1180 /* forcibly flush all Tx buffers on our queues */ 1189 /* forcibly flush all Tx buffers on our queues */
1181 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1190 for (i = 0; i < wl->num_links; i++)
1182 wl1271_tx_reset_link_queues(wl, i); 1191 wl1271_tx_reset_link_queues(wl, i);
1183 1192
1184out_wake: 1193out_wake:
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 35489c300da1..79cb3ff8b71f 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -37,6 +37,7 @@
37#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12) 37#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
38#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13) 38#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
39#define TX_HW_ATTR_HOST_ENCRYPT BIT(14) 39#define TX_HW_ATTR_HOST_ENCRYPT BIT(14)
40#define TX_HW_ATTR_EAPOL_FRAME BIT(15)
40 41
41#define TX_HW_ATTR_OFST_SAVE_RETRIES 0 42#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
42#define TX_HW_ATTR_OFST_HEADER_PAD 1 43#define TX_HW_ATTR_OFST_HEADER_PAD 1
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 06efc12a39e5..95a54504f0cc 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -73,6 +73,8 @@ struct wlcore_ops {
73 void (*tx_immediate_compl)(struct wl1271 *wl); 73 void (*tx_immediate_compl)(struct wl1271 *wl);
74 int (*hw_init)(struct wl1271 *wl); 74 int (*hw_init)(struct wl1271 *wl);
75 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif); 75 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
76 void (*convert_fw_status)(struct wl1271 *wl, void *raw_fw_status,
77 struct wl_fw_status *fw_status);
76 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl, 78 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
77 struct wl12xx_vif *wlvif); 79 struct wl12xx_vif *wlvif);
78 int (*get_pg_ver)(struct wl1271 *wl, s8 *ver); 80 int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
@@ -220,7 +222,7 @@ struct wl1271 {
220 int channel; 222 int channel;
221 u8 system_hlid; 223 u8 system_hlid;
222 224
223 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; 225 unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
224 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; 226 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
225 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; 227 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
226 unsigned long rate_policies_map[ 228 unsigned long rate_policies_map[
@@ -228,7 +230,7 @@ struct wl1271 {
228 unsigned long klv_templates_map[ 230 unsigned long klv_templates_map[
229 BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)]; 231 BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
230 232
231 u8 session_ids[WL12XX_MAX_LINKS]; 233 u8 session_ids[WLCORE_MAX_LINKS];
232 234
233 struct list_head wlvif_list; 235 struct list_head wlvif_list;
234 236
@@ -346,8 +348,8 @@ struct wl1271 {
346 u32 buffer_cmd; 348 u32 buffer_cmd;
347 u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; 349 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
348 350
349 struct wl_fw_status_1 *fw_status_1; 351 void *raw_fw_status;
350 struct wl_fw_status_2 *fw_status_2; 352 struct wl_fw_status *fw_status;
351 struct wl1271_tx_hw_res_if *tx_res_if; 353 struct wl1271_tx_hw_res_if *tx_res_if;
352 354
353 /* Current chipset configuration */ 355 /* Current chipset configuration */
@@ -376,7 +378,7 @@ struct wl1271 {
376 * AP-mode - links indexed by HLID. The global and broadcast links 378 * AP-mode - links indexed by HLID. The global and broadcast links
377 * are always active. 379 * are always active.
378 */ 380 */
379 struct wl1271_link links[WL12XX_MAX_LINKS]; 381 struct wl1271_link links[WLCORE_MAX_LINKS];
380 382
381 /* number of currently active links */ 383 /* number of currently active links */
382 int active_link_count; 384 int active_link_count;
@@ -405,6 +407,9 @@ struct wl1271 {
405 /* AP-mode - number of currently connected stations */ 407 /* AP-mode - number of currently connected stations */
406 int active_sta_count; 408 int active_sta_count;
407 409
410 /* Flag determining whether AP should broadcast OFDM-only rates */
411 bool ofdm_only_ap;
412
408 /* last wlvif we transmitted from */ 413 /* last wlvif we transmitted from */
409 struct wl12xx_vif *last_wlvif; 414 struct wl12xx_vif *last_wlvif;
410 415
@@ -434,6 +439,10 @@ struct wl1271 {
434 u32 num_tx_desc; 439 u32 num_tx_desc;
435 /* number of RX descriptors the HW supports. */ 440 /* number of RX descriptors the HW supports. */
436 u32 num_rx_desc; 441 u32 num_rx_desc;
442 /* number of links the HW supports */
443 u8 num_links;
444 /* max stations a single AP can support */
445 u8 max_ap_stations;
437 446
438 /* translate HW Tx rates to standard rate-indices */ 447 /* translate HW Tx rates to standard rate-indices */
439 const u8 **band_rate_to_idx; 448 const u8 **band_rate_to_idx;
@@ -448,10 +457,11 @@ struct wl1271 {
448 struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS]; 457 struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
449 458
450 /* size of the private FW status data */ 459 /* size of the private FW status data */
460 size_t fw_status_len;
451 size_t fw_status_priv_len; 461 size_t fw_status_priv_len;
452 462
453 /* RX Data filter rule state - enabled/disabled */ 463 /* RX Data filter rule state - enabled/disabled */
454 bool rx_filter_enabled[WL1271_MAX_RX_FILTERS]; 464 unsigned long rx_filter_enabled[BITS_TO_LONGS(WL1271_MAX_RX_FILTERS)];
455 465
456 /* size of the private static data */ 466 /* size of the private static data */
457 size_t static_data_priv_len; 467 size_t static_data_priv_len;
@@ -476,8 +486,9 @@ struct wl1271 {
476 486
477 struct completion nvs_loading_complete; 487 struct completion nvs_loading_complete;
478 488
479 /* number of concurrent channels the HW supports */ 489 /* interface combinations supported by the hw */
480 u32 num_channels; 490 const struct ieee80211_iface_combination *iface_combinations;
491 u8 n_iface_combinations;
481}; 492};
482 493
483int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); 494int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index ce7261ce8b59..756e890bc5ee 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -58,10 +58,15 @@
58#define WL1271_DEFAULT_DTIM_PERIOD 1 58#define WL1271_DEFAULT_DTIM_PERIOD 1
59 59
60#define WL12XX_MAX_ROLES 4 60#define WL12XX_MAX_ROLES 4
61#define WL12XX_MAX_LINKS 12
62#define WL12XX_INVALID_ROLE_ID 0xff 61#define WL12XX_INVALID_ROLE_ID 0xff
63#define WL12XX_INVALID_LINK_ID 0xff 62#define WL12XX_INVALID_LINK_ID 0xff
64 63
64/*
65 * max number of links allowed by all HWs.
66 * this is NOT the actual max links supported by the current hw.
67 */
68#define WLCORE_MAX_LINKS 16
69
65/* the driver supports the 2.4Ghz and 5Ghz bands */ 70/* the driver supports the 2.4Ghz and 5Ghz bands */
66#define WLCORE_NUM_BANDS 2 71#define WLCORE_NUM_BANDS 2
67 72
@@ -118,72 +123,58 @@ struct wl1271_chip {
118 123
119#define NUM_TX_QUEUES 4 124#define NUM_TX_QUEUES 4
120 125
121#define AP_MAX_STATIONS 8 126struct wl_fw_status {
122 127 u32 intr;
123struct wl_fw_packet_counters {
124 /* Cumulative counter of released packets per AC */
125 u8 tx_released_pkts[NUM_TX_QUEUES];
126
127 /* Cumulative counter of freed packets per HLID */
128 u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
129
130 /* Cumulative counter of released Voice memory blocks */
131 u8 tx_voice_released_blks;
132
133 /* Tx rate of the last transmitted packet */
134 u8 tx_last_rate;
135
136 u8 padding[2];
137} __packed;
138
139/* FW status registers */
140struct wl_fw_status_1 {
141 __le32 intr;
142 u8 fw_rx_counter; 128 u8 fw_rx_counter;
143 u8 drv_rx_counter; 129 u8 drv_rx_counter;
144 u8 reserved;
145 u8 tx_results_counter; 130 u8 tx_results_counter;
146 __le32 rx_pkt_descs[0]; 131 __le32 *rx_pkt_descs;
147} __packed;
148
149/*
150 * Each HW arch has a different number of Rx descriptors.
151 * The length of the status depends on it, since it holds an array
152 * of descriptors.
153 */
154#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
155 (sizeof(struct wl_fw_status_1) + \
156 (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
157 num_rx_desc)
158 132
159struct wl_fw_status_2 { 133 u32 fw_localtime;
160 __le32 fw_localtime;
161 134
162 /* 135 /*
163 * A bitmap (where each bit represents a single HLID) 136 * A bitmap (where each bit represents a single HLID)
164 * to indicate if the station is in PS mode. 137 * to indicate if the station is in PS mode.
165 */ 138 */
166 __le32 link_ps_bitmap; 139 u32 link_ps_bitmap;
167 140
168 /* 141 /*
169 * A bitmap (where each bit represents a single HLID) to indicate 142 * A bitmap (where each bit represents a single HLID) to indicate
170 * if the station is in Fast mode 143 * if the station is in Fast mode
171 */ 144 */
172 __le32 link_fast_bitmap; 145 u32 link_fast_bitmap;
173 146
174 /* Cumulative counter of total released mem blocks since FW-reset */ 147 /* Cumulative counter of total released mem blocks since FW-reset */
175 __le32 total_released_blks; 148 u32 total_released_blks;
176 149
177 /* Size (in Memory Blocks) of TX pool */ 150 /* Size (in Memory Blocks) of TX pool */
178 __le32 tx_total; 151 u32 tx_total;
152
153 struct {
154 /*
155 * Cumulative counter of released packets per AC
156 * (length of the array is NUM_TX_QUEUES)
157 */
158 u8 *tx_released_pkts;
179 159
180 struct wl_fw_packet_counters counters; 160 /*
161 * Cumulative counter of freed packets per HLID
162 * (length of the array is wl->num_links)
163 */
164 u8 *tx_lnk_free_pkts;
165
166 /* Cumulative counter of released Voice memory blocks */
167 u8 tx_voice_released_blks;
181 168
182 __le32 log_start_addr; 169 /* Tx rate of the last transmitted packet */
170 u8 tx_last_rate;
171 } counters;
172
173 u32 log_start_addr;
183 174
184 /* Private status to be used by the lower drivers */ 175 /* Private status to be used by the lower drivers */
185 u8 priv[0]; 176 void *priv;
186} __packed; 177};
187 178
188#define WL1271_MAX_CHANNELS 64 179#define WL1271_MAX_CHANNELS 64
189struct wl1271_scan { 180struct wl1271_scan {
@@ -240,6 +231,7 @@ enum wl12xx_flags {
240 WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, 231 WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
241 WL1271_FLAG_INTENDED_FW_RECOVERY, 232 WL1271_FLAG_INTENDED_FW_RECOVERY,
242 WL1271_FLAG_IO_FAILED, 233 WL1271_FLAG_IO_FAILED,
234 WL1271_FLAG_REINIT_TX_WDOG,
243}; 235};
244 236
245enum wl12xx_vif_flags { 237enum wl12xx_vif_flags {
@@ -368,7 +360,7 @@ struct wl12xx_vif {
368 360
369 /* HLIDs bitmap of associated stations */ 361 /* HLIDs bitmap of associated stations */
370 unsigned long sta_hlid_map[BITS_TO_LONGS( 362 unsigned long sta_hlid_map[BITS_TO_LONGS(
371 WL12XX_MAX_LINKS)]; 363 WLCORE_MAX_LINKS)];
372 364
373 /* recoreded keys - set here before AP startup */ 365 /* recoreded keys - set here before AP startup */
374 struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS]; 366 struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
@@ -385,7 +377,7 @@ struct wl12xx_vif {
385 /* counters of packets per AC, across all links in the vif */ 377 /* counters of packets per AC, across all links in the vif */
386 int tx_queue_count[NUM_TX_QUEUES]; 378 int tx_queue_count[NUM_TX_QUEUES];
387 379
388 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; 380 unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
389 381
390 u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; 382 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
391 u8 ssid_len; 383 u8 ssid_len;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d24d4a958c67..d5c371d77ddf 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -42,8 +42,7 @@
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/string.h> 43#include <linux/string.h>
44#include <linux/wireless.h> 44#include <linux/wireless.h>
45#include <linux/ieee80211.h> 45#include <net/cfg80211.h>
46#include <linux/etherdevice.h>
47 46
48#include <net/iw_handler.h> 47#include <net/iw_handler.h>
49 48
@@ -1454,7 +1453,8 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
1454{ 1453{
1455 struct wl3501_card *this = netdev_priv(dev); 1454 struct wl3501_card *this = netdev_priv(dev);
1456 1455
1457 wrqu->freq.m = ieee80211_dsss_chan_to_freq(this->chan) * 100000; 1456 wrqu->freq.m = 100000 *
1457 ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
1458 wrqu->freq.e = 1; 1458 wrqu->freq.e = 1;
1459 return 0; 1459 return 0;
1460} 1460}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index d39c4178c33a..6f5c793a7855 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -18,7 +18,7 @@
18#include <linux/netdevice.h> 18#include <linux/netdevice.h>
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/wireless.h> 20#include <linux/wireless.h>
21#include <linux/ieee80211.h> 21#include <net/cfg80211.h>
22#include <net/iw_handler.h> 22#include <net/iw_handler.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/if_arp.h> 24#include <linux/if_arp.h>
@@ -914,11 +914,8 @@ static int zd1201_set_freq(struct net_device *dev,
914 914
915 if (freq->e == 0) 915 if (freq->e == 0)
916 channel = freq->m; 916 channel = freq->m;
917 else { 917 else
918 channel = ieee80211_freq_to_dsss_chan(freq->m); 918 channel = ieee80211_frequency_to_channel(freq->m);
919 if (channel < 0)
920 channel = 0;
921 }
922 919
923 err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel); 920 err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel);
924 if (err) 921 if (err)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ae413a2cbee7..89d1d0556b6e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,37 +48,19 @@
48typedef unsigned int pending_ring_idx_t; 48typedef unsigned int pending_ring_idx_t;
49#define INVALID_PENDING_RING_IDX (~0U) 49#define INVALID_PENDING_RING_IDX (~0U)
50 50
51/* For the head field in pending_tx_info: it is used to indicate
52 * whether this tx info is the head of one or more coalesced requests.
53 *
54 * When head != INVALID_PENDING_RING_IDX, it means the start of a new
55 * tx requests queue and the end of previous queue.
56 *
57 * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
58 *
59 * ...|0 I I I|5 I|9 I I I|...
60 * -->|<-INUSE----------------
61 *
62 * After consuming the first slot(s) we have:
63 *
64 * ...|V V V V|5 I|9 I I I|...
65 * -----FREE->|<-INUSE--------
66 *
67 * where V stands for "valid pending ring index". Any number other
68 * than INVALID_PENDING_RING_IDX is OK. These entries are considered
69 * free and can contain any number other than
70 * INVALID_PENDING_RING_IDX. In practice we use 0.
71 *
72 * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
73 * above example) number is the index into pending_tx_info and
74 * mmap_pages arrays.
75 */
76struct pending_tx_info { 51struct pending_tx_info {
77 struct xen_netif_tx_request req; /* coalesced tx request */ 52 struct xen_netif_tx_request req; /* tx request */
78 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX 53 /* Callback data for released SKBs. The callback is always
79 * if it is head of one or more tx 54 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
80 * reqs 55 * also an index in pending_tx_info array. It is initialized in
81 */ 56 * xenvif_alloc and it never changes.
57 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
58 * callback_struct in this array of struct pending_tx_info's, then ctx
59 * to the next, or NULL if there is no more slot for this skb.
60 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
61 * to this field.
62 */
63 struct ubuf_info callback_struct;
82}; 64};
83 65
84#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 66#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
@@ -99,7 +81,7 @@ struct xenvif_rx_meta {
99 81
100#define MAX_BUFFER_OFFSET PAGE_SIZE 82#define MAX_BUFFER_OFFSET PAGE_SIZE
101 83
102#define MAX_PENDING_REQS 256 84#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
103 85
104/* It's possible for an skb to have a maximal number of frags 86/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the 87 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
@@ -108,11 +90,25 @@ struct xenvif_rx_meta {
108 */ 90 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) 91#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110 92
93#define NETBACK_INVALID_HANDLE -1
94
95/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
96 * the maximum slots a valid packet can use. Now this value is defined
97 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
98 * all backend.
99 */
100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
101
111struct xenvif { 102struct xenvif {
112 /* Unique identifier for this interface. */ 103 /* Unique identifier for this interface. */
113 domid_t domid; 104 domid_t domid;
114 unsigned int handle; 105 unsigned int handle;
115 106
107 /* Is this interface disabled? True when backend discovers
108 * frontend is rogue.
109 */
110 bool disabled;
111
116 /* Use NAPI for guest TX */ 112 /* Use NAPI for guest TX */
117 struct napi_struct napi; 113 struct napi_struct napi;
118 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 114 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -126,13 +122,26 @@ struct xenvif {
126 pending_ring_idx_t pending_cons; 122 pending_ring_idx_t pending_cons;
127 u16 pending_ring[MAX_PENDING_REQS]; 123 u16 pending_ring[MAX_PENDING_REQS];
128 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; 124 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
129 125 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
130 /* Coalescing tx requests before copying makes number of grant 126
131 * copy ops greater or equal to number of slots required. In 127 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
132 * worst case a tx request consumes 2 gnttab_copy. 128 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
129 /* passed to gnttab_[un]map_refs with pages under (un)mapping */
130 struct page *pages_to_map[MAX_PENDING_REQS];
131 struct page *pages_to_unmap[MAX_PENDING_REQS];
132
133 /* This prevents zerocopy callbacks to race over dealloc_ring */
134 spinlock_t callback_lock;
135 /* This prevents dealloc thread and NAPI instance to race over response
136 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
137 * it only protect response creation
133 */ 138 */
134 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; 139 spinlock_t response_lock;
135 140 pending_ring_idx_t dealloc_prod;
141 pending_ring_idx_t dealloc_cons;
142 u16 dealloc_ring[MAX_PENDING_REQS];
143 struct task_struct *dealloc_task;
144 wait_queue_head_t dealloc_wq;
136 145
137 /* Use kthread for guest RX */ 146 /* Use kthread for guest RX */
138 struct task_struct *task; 147 struct task_struct *task;
@@ -144,6 +153,9 @@ struct xenvif {
144 struct xen_netif_rx_back_ring rx; 153 struct xen_netif_rx_back_ring rx;
145 struct sk_buff_head rx_queue; 154 struct sk_buff_head rx_queue;
146 RING_IDX rx_last_skb_slots; 155 RING_IDX rx_last_skb_slots;
156 bool rx_queue_purge;
157
158 struct timer_list wake_queue;
147 159
148 /* This array is allocated seperately as it is large */ 160 /* This array is allocated seperately as it is large */
149 struct gnttab_copy *grant_copy_op; 161 struct gnttab_copy *grant_copy_op;
@@ -175,6 +187,10 @@ struct xenvif {
175 187
176 /* Statistics */ 188 /* Statistics */
177 unsigned long rx_gso_checksum_fixup; 189 unsigned long rx_gso_checksum_fixup;
190 unsigned long tx_zerocopy_sent;
191 unsigned long tx_zerocopy_success;
192 unsigned long tx_zerocopy_fail;
193 unsigned long tx_frag_overflow;
178 194
179 /* Miscellaneous private stuff. */ 195 /* Miscellaneous private stuff. */
180 struct net_device *dev; 196 struct net_device *dev;
@@ -216,9 +232,11 @@ void xenvif_carrier_off(struct xenvif *vif);
216 232
217int xenvif_tx_action(struct xenvif *vif, int budget); 233int xenvif_tx_action(struct xenvif *vif, int budget);
218 234
219int xenvif_kthread(void *data); 235int xenvif_kthread_guest_rx(void *data);
220void xenvif_kick_thread(struct xenvif *vif); 236void xenvif_kick_thread(struct xenvif *vif);
221 237
238int xenvif_dealloc_kthread(void *data);
239
222/* Determine whether the needed number of slots (req) are available, 240/* Determine whether the needed number of slots (req) are available,
223 * and set req_event if not. 241 * and set req_event if not.
224 */ 242 */
@@ -226,6 +244,24 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
226 244
227void xenvif_stop_queue(struct xenvif *vif); 245void xenvif_stop_queue(struct xenvif *vif);
228 246
247/* Callback from stack when TX packet can be released */
248void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
249
250/* Unmap a pending page and release it back to the guest */
251void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
252
253static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
254{
255 return MAX_PENDING_REQS -
256 vif->pending_prod + vif->pending_cons;
257}
258
259/* Callback from stack when TX packet can be released */
260void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
261
229extern bool separate_tx_rx_irq; 262extern bool separate_tx_rx_irq;
230 263
264extern unsigned int rx_drain_timeout_msecs;
265extern unsigned int rx_drain_timeout_jiffies;
266
231#endif /* __XEN_NETBACK__COMMON_H__ */ 267#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 301cc037fda8..ef05c5c49d41 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -38,6 +38,7 @@
38 38
39#include <xen/events.h> 39#include <xen/events.h>
40#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
41#include <xen/balloon.h>
41 42
42#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
43#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
@@ -62,6 +63,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
62 struct xenvif *vif = container_of(napi, struct xenvif, napi); 63 struct xenvif *vif = container_of(napi, struct xenvif, napi);
63 int work_done; 64 int work_done;
64 65
66 /* This vif is rogue, we pretend we've there is nothing to do
67 * for this vif to deschedule it from NAPI. But this interface
68 * will be turned off in thread context later.
69 */
70 if (unlikely(vif->disabled)) {
71 napi_complete(napi);
72 return 0;
73 }
74
65 work_done = xenvif_tx_action(vif, budget); 75 work_done = xenvif_tx_action(vif, budget);
66 76
67 if (work_done < budget) { 77 if (work_done < budget) {
@@ -113,6 +123,18 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
113 return IRQ_HANDLED; 123 return IRQ_HANDLED;
114} 124}
115 125
126static void xenvif_wake_queue(unsigned long data)
127{
128 struct xenvif *vif = (struct xenvif *)data;
129
130 if (netif_queue_stopped(vif->dev)) {
131 netdev_err(vif->dev, "draining TX queue\n");
132 vif->rx_queue_purge = true;
133 xenvif_kick_thread(vif);
134 netif_wake_queue(vif->dev);
135 }
136}
137
116static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 138static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
117{ 139{
118 struct xenvif *vif = netdev_priv(dev); 140 struct xenvif *vif = netdev_priv(dev);
@@ -121,7 +143,9 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
121 BUG_ON(skb->dev != dev); 143 BUG_ON(skb->dev != dev);
122 144
123 /* Drop the packet if vif is not ready */ 145 /* Drop the packet if vif is not ready */
124 if (vif->task == NULL || !xenvif_schedulable(vif)) 146 if (vif->task == NULL ||
147 vif->dealloc_task == NULL ||
148 !xenvif_schedulable(vif))
125 goto drop; 149 goto drop;
126 150
127 /* At best we'll need one slot for the header and one for each 151 /* At best we'll need one slot for the header and one for each
@@ -139,8 +163,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
139 * then turn off the queue to give the ring a chance to 163 * then turn off the queue to give the ring a chance to
140 * drain. 164 * drain.
141 */ 165 */
142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) 166 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
167 vif->wake_queue.function = xenvif_wake_queue;
168 vif->wake_queue.data = (unsigned long)vif;
143 xenvif_stop_queue(vif); 169 xenvif_stop_queue(vif);
170 mod_timer(&vif->wake_queue,
171 jiffies + rx_drain_timeout_jiffies);
172 }
144 173
145 skb_queue_tail(&vif->rx_queue, skb); 174 skb_queue_tail(&vif->rx_queue, skb);
146 xenvif_kick_thread(vif); 175 xenvif_kick_thread(vif);
@@ -233,6 +262,28 @@ static const struct xenvif_stat {
233 "rx_gso_checksum_fixup", 262 "rx_gso_checksum_fixup",
234 offsetof(struct xenvif, rx_gso_checksum_fixup) 263 offsetof(struct xenvif, rx_gso_checksum_fixup)
235 }, 264 },
265 /* If (sent != success + fail), there are probably packets never
266 * freed up properly!
267 */
268 {
269 "tx_zerocopy_sent",
270 offsetof(struct xenvif, tx_zerocopy_sent),
271 },
272 {
273 "tx_zerocopy_success",
274 offsetof(struct xenvif, tx_zerocopy_success),
275 },
276 {
277 "tx_zerocopy_fail",
278 offsetof(struct xenvif, tx_zerocopy_fail)
279 },
280 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
281 * a guest with the same MAX_SKB_FRAG
282 */
283 {
284 "tx_frag_overflow",
285 offsetof(struct xenvif, tx_frag_overflow)
286 },
236}; 287};
237 288
238static int xenvif_get_sset_count(struct net_device *dev, int string_set) 289static int xenvif_get_sset_count(struct net_device *dev, int string_set)
@@ -321,11 +372,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
321 vif->ip_csum = 1; 372 vif->ip_csum = 1;
322 vif->dev = dev; 373 vif->dev = dev;
323 374
375 vif->disabled = false;
376
324 vif->credit_bytes = vif->remaining_credit = ~0UL; 377 vif->credit_bytes = vif->remaining_credit = ~0UL;
325 vif->credit_usec = 0UL; 378 vif->credit_usec = 0UL;
326 init_timer(&vif->credit_timeout); 379 init_timer(&vif->credit_timeout);
327 vif->credit_window_start = get_jiffies_64(); 380 vif->credit_window_start = get_jiffies_64();
328 381
382 init_timer(&vif->wake_queue);
383
329 dev->netdev_ops = &xenvif_netdev_ops; 384 dev->netdev_ops = &xenvif_netdev_ops;
330 dev->hw_features = NETIF_F_SG | 385 dev->hw_features = NETIF_F_SG |
331 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 386 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -342,8 +397,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
342 vif->pending_prod = MAX_PENDING_REQS; 397 vif->pending_prod = MAX_PENDING_REQS;
343 for (i = 0; i < MAX_PENDING_REQS; i++) 398 for (i = 0; i < MAX_PENDING_REQS; i++)
344 vif->pending_ring[i] = i; 399 vif->pending_ring[i] = i;
345 for (i = 0; i < MAX_PENDING_REQS; i++) 400 spin_lock_init(&vif->callback_lock);
346 vif->mmap_pages[i] = NULL; 401 spin_lock_init(&vif->response_lock);
402 /* If ballooning is disabled, this will consume real memory, so you
403 * better enable it. The long term solution would be to use just a
404 * bunch of valid page descriptors, without dependency on ballooning
405 */
406 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
407 vif->mmap_pages,
408 false);
409 if (err) {
410 netdev_err(dev, "Could not reserve mmap_pages\n");
411 return ERR_PTR(-ENOMEM);
412 }
413 for (i = 0; i < MAX_PENDING_REQS; i++) {
414 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
415 { .callback = xenvif_zerocopy_callback,
416 .ctx = NULL,
417 .desc = i };
418 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
419 }
347 420
348 /* 421 /*
349 * Initialise a dummy MAC address. We choose the numerically 422 * Initialise a dummy MAC address. We choose the numerically
@@ -381,12 +454,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
381 454
382 BUG_ON(vif->tx_irq); 455 BUG_ON(vif->tx_irq);
383 BUG_ON(vif->task); 456 BUG_ON(vif->task);
457 BUG_ON(vif->dealloc_task);
384 458
385 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 459 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
386 if (err < 0) 460 if (err < 0)
387 goto err; 461 goto err;
388 462
389 init_waitqueue_head(&vif->wq); 463 init_waitqueue_head(&vif->wq);
464 init_waitqueue_head(&vif->dealloc_wq);
390 465
391 if (tx_evtchn == rx_evtchn) { 466 if (tx_evtchn == rx_evtchn) {
392 /* feature-split-event-channels == 0 */ 467 /* feature-split-event-channels == 0 */
@@ -420,8 +495,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
420 disable_irq(vif->rx_irq); 495 disable_irq(vif->rx_irq);
421 } 496 }
422 497
423 task = kthread_create(xenvif_kthread, 498 task = kthread_create(xenvif_kthread_guest_rx,
424 (void *)vif, "%s", vif->dev->name); 499 (void *)vif, "%s-guest-rx", vif->dev->name);
425 if (IS_ERR(task)) { 500 if (IS_ERR(task)) {
426 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 501 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
427 err = PTR_ERR(task); 502 err = PTR_ERR(task);
@@ -430,6 +505,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
430 505
431 vif->task = task; 506 vif->task = task;
432 507
508 task = kthread_create(xenvif_dealloc_kthread,
509 (void *)vif, "%s-dealloc", vif->dev->name);
510 if (IS_ERR(task)) {
511 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
512 err = PTR_ERR(task);
513 goto err_rx_unbind;
514 }
515
516 vif->dealloc_task = task;
517
433 rtnl_lock(); 518 rtnl_lock();
434 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 519 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
435 dev_set_mtu(vif->dev, ETH_DATA_LEN); 520 dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -440,6 +525,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
440 rtnl_unlock(); 525 rtnl_unlock();
441 526
442 wake_up_process(vif->task); 527 wake_up_process(vif->task);
528 wake_up_process(vif->dealloc_task);
443 529
444 return 0; 530 return 0;
445 531
@@ -473,10 +559,16 @@ void xenvif_disconnect(struct xenvif *vif)
473 xenvif_carrier_off(vif); 559 xenvif_carrier_off(vif);
474 560
475 if (vif->task) { 561 if (vif->task) {
562 del_timer_sync(&vif->wake_queue);
476 kthread_stop(vif->task); 563 kthread_stop(vif->task);
477 vif->task = NULL; 564 vif->task = NULL;
478 } 565 }
479 566
567 if (vif->dealloc_task) {
568 kthread_stop(vif->dealloc_task);
569 vif->dealloc_task = NULL;
570 }
571
480 if (vif->tx_irq) { 572 if (vif->tx_irq) {
481 if (vif->tx_irq == vif->rx_irq) 573 if (vif->tx_irq == vif->rx_irq)
482 unbind_from_irqhandler(vif->tx_irq, vif); 574 unbind_from_irqhandler(vif->tx_irq, vif);
@@ -492,6 +584,43 @@ void xenvif_disconnect(struct xenvif *vif)
492 584
493void xenvif_free(struct xenvif *vif) 585void xenvif_free(struct xenvif *vif)
494{ 586{
587 int i, unmap_timeout = 0;
588 /* Here we want to avoid timeout messages if an skb can be legitimately
589 * stuck somewhere else. Realistically this could be an another vif's
590 * internal or QDisc queue. That another vif also has this
591 * rx_drain_timeout_msecs timeout, but the timer only ditches the
592 * internal queue. After that, the QDisc queue can put in worst case
593 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
594 * internal queue, so we need several rounds of such timeouts until we
595 * can be sure that no another vif should have skb's from us. We are
596 * not sending more skb's, so newly stuck packets are not interesting
597 * for us here.
598 */
599 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
600 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
601
602 for (i = 0; i < MAX_PENDING_REQS; ++i) {
603 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
604 unmap_timeout++;
605 schedule_timeout(msecs_to_jiffies(1000));
606 if (unmap_timeout > worst_case_skb_lifetime &&
607 net_ratelimit())
608 netdev_err(vif->dev,
609 "Page still granted! Index: %x\n",
610 i);
611 /* If there are still unmapped pages, reset the loop to
612 * start checking again. We shouldn't exit here until
613 * dealloc thread and NAPI instance release all the
614 * pages. If a kernel bug causes the skbs to stall
615 * somewhere, the interface cannot be brought down
616 * properly.
617 */
618 i = -1;
619 }
620 }
621
622 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
623
495 netif_napi_del(&vif->napi); 624 netif_napi_del(&vif->napi);
496 625
497 unregister_netdev(vif->dev); 626 unregister_netdev(vif->dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 438d0c09b7e6..3f021e054ba1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -37,6 +37,7 @@
37#include <linux/kthread.h> 37#include <linux/kthread.h>
38#include <linux/if_vlan.h> 38#include <linux/if_vlan.h>
39#include <linux/udp.h> 39#include <linux/udp.h>
40#include <linux/highmem.h>
40 41
41#include <net/tcp.h> 42#include <net/tcp.h>
42 43
@@ -54,6 +55,13 @@
54bool separate_tx_rx_irq = 1; 55bool separate_tx_rx_irq = 1;
55module_param(separate_tx_rx_irq, bool, 0644); 56module_param(separate_tx_rx_irq, bool, 0644);
56 57
58/* When guest ring is filled up, qdisc queues the packets for us, but we have
59 * to timeout them, otherwise other guests' packets can get stuck there
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63unsigned int rx_drain_timeout_jiffies;
64
57/* 65/*
58 * This is the maximum slots a skb can have. If a guest sends a skb 66 * This is the maximum slots a skb can have. If a guest sends a skb
59 * which exceeds this limit it is considered malicious. 67 * which exceeds this limit it is considered malicious.
@@ -62,24 +70,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
62static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 70static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63module_param(fatal_skb_slots, uint, 0444); 71module_param(fatal_skb_slots, uint, 0444);
64 72
65/*
66 * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67 * the maximum slots a valid packet can use. Now this value is defined
68 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
69 * all backend.
70 */
71#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
72
73/*
74 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
75 * one or more merged tx requests, otherwise it is the continuation of
76 * previous tx request.
77 */
78static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
79{
80 return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
81}
82
83static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 73static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
84 u8 status); 74 u8 status);
85 75
@@ -109,6 +99,21 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
110} 100}
111 101
102#define callback_param(vif, pending_idx) \
103 (vif->pending_tx_info[pending_idx].callback_struct)
104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */
107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
108{
109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp =
111 container_of(ubuf, struct pending_tx_info, callback_struct);
112 return container_of(temp - pending_idx,
113 struct xenvif,
114 pending_tx_info[0]);
115}
116
112/* This is a miniumum size for the linear area to avoid lots of 117/* This is a miniumum size for the linear area to avoid lots of
113 * calls to __pskb_pull_tail() as we set up checksum offsets. The 118 * calls to __pskb_pull_tail() as we set up checksum offsets. The
114 * value 128 was chosen as it covers all IPv4 and most likely 119 * value 128 was chosen as it covers all IPv4 and most likely
@@ -131,12 +136,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
131 return i & (MAX_PENDING_REQS-1); 136 return i & (MAX_PENDING_REQS-1);
132} 137}
133 138
134static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
135{
136 return MAX_PENDING_REQS -
137 vif->pending_prod + vif->pending_cons;
138}
139
140bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) 139bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
141{ 140{
142 RING_IDX prod, cons; 141 RING_IDX prod, cons;
@@ -192,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
192 * into multiple copies tend to give large frags their 191 * into multiple copies tend to give large frags their
193 * own buffers as before. 192 * own buffers as before.
194 */ 193 */
195 if ((offset + size > MAX_BUFFER_OFFSET) && 194 BUG_ON(size > MAX_BUFFER_OFFSET);
196 (size <= MAX_BUFFER_OFFSET) && offset && !head) 195 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
197 return true; 196 return true;
198 197
199 return false; 198 return false;
@@ -235,7 +234,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
235static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, 234static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
236 struct netrx_pending_operations *npo, 235 struct netrx_pending_operations *npo,
237 struct page *page, unsigned long size, 236 struct page *page, unsigned long size,
238 unsigned long offset, int *head) 237 unsigned long offset, int *head,
238 struct xenvif *foreign_vif,
239 grant_ref_t foreign_gref)
239{ 240{
240 struct gnttab_copy *copy_gop; 241 struct gnttab_copy *copy_gop;
241 struct xenvif_rx_meta *meta; 242 struct xenvif_rx_meta *meta;
@@ -277,8 +278,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
277 copy_gop->flags = GNTCOPY_dest_gref; 278 copy_gop->flags = GNTCOPY_dest_gref;
278 copy_gop->len = bytes; 279 copy_gop->len = bytes;
279 280
280 copy_gop->source.domid = DOMID_SELF; 281 if (foreign_vif) {
281 copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); 282 copy_gop->source.domid = foreign_vif->domid;
283 copy_gop->source.u.ref = foreign_gref;
284 copy_gop->flags |= GNTCOPY_source_gref;
285 } else {
286 copy_gop->source.domid = DOMID_SELF;
287 copy_gop->source.u.gmfn =
288 virt_to_mfn(page_address(page));
289 }
282 copy_gop->source.offset = offset; 290 copy_gop->source.offset = offset;
283 291
284 copy_gop->dest.domid = vif->domid; 292 copy_gop->dest.domid = vif->domid;
@@ -338,6 +346,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
338 int head = 1; 346 int head = 1;
339 int old_meta_prod; 347 int old_meta_prod;
340 int gso_type; 348 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
351 struct xenvif *foreign_vif = NULL;
341 352
342 old_meta_prod = npo->meta_prod; 353 old_meta_prod = npo->meta_prod;
343 354
@@ -375,6 +386,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
375 npo->copy_off = 0; 386 npo->copy_off = 0;
376 npo->copy_gref = req->gref; 387 npo->copy_gref = req->gref;
377 388
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
378 data = skb->data; 402 data = skb->data;
379 while (data < skb_tail_pointer(skb)) { 403 while (data < skb_tail_pointer(skb)) {
380 unsigned int offset = offset_in_page(data); 404 unsigned int offset = offset_in_page(data);
@@ -384,7 +408,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
384 len = skb_tail_pointer(skb) - data; 408 len = skb_tail_pointer(skb) - data;
385 409
386 xenvif_gop_frag_copy(vif, skb, npo, 410 xenvif_gop_frag_copy(vif, skb, npo,
387 virt_to_page(data), len, offset, &head); 411 virt_to_page(data), len, offset, &head,
412 NULL,
413 0);
388 data += len; 414 data += len;
389 } 415 }
390 416
@@ -393,7 +419,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
393 skb_frag_page(&skb_shinfo(skb)->frags[i]), 419 skb_frag_page(&skb_shinfo(skb)->frags[i]),
394 skb_frag_size(&skb_shinfo(skb)->frags[i]), 420 skb_frag_size(&skb_shinfo(skb)->frags[i]),
395 skb_shinfo(skb)->frags[i].page_offset, 421 skb_shinfo(skb)->frags[i].page_offset,
396 &head); 422 &head,
423 foreign_vif,
424 foreign_grefs[i]);
397 } 425 }
398 426
399 return npo->meta_prod - old_meta_prod; 427 return npo->meta_prod - old_meta_prod;
@@ -451,10 +479,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
451 } 479 }
452} 480}
453 481
454struct skb_cb_overlay { 482struct xenvif_rx_cb {
455 int meta_slots_used; 483 int meta_slots_used;
456}; 484};
457 485
486#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
487
458void xenvif_kick_thread(struct xenvif *vif) 488void xenvif_kick_thread(struct xenvif *vif)
459{ 489{
460 wake_up(&vif->wq); 490 wake_up(&vif->wq);
@@ -470,7 +500,6 @@ static void xenvif_rx_action(struct xenvif *vif)
470 LIST_HEAD(notify); 500 LIST_HEAD(notify);
471 int ret; 501 int ret;
472 unsigned long offset; 502 unsigned long offset;
473 struct skb_cb_overlay *sco;
474 bool need_to_notify = false; 503 bool need_to_notify = false;
475 504
476 struct netrx_pending_operations npo = { 505 struct netrx_pending_operations npo = {
@@ -482,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
482 511
483 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 512 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
484 RING_IDX max_slots_needed; 513 RING_IDX max_slots_needed;
514 RING_IDX old_req_cons;
515 RING_IDX ring_slots_used;
485 int i; 516 int i;
486 517
487 /* We need a cheap worse case estimate for the number of 518 /* We need a cheap worse case estimate for the number of
@@ -493,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
493 PAGE_SIZE); 524 PAGE_SIZE);
494 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 525 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
495 unsigned int size; 526 unsigned int size;
527 unsigned int offset;
528
496 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 529 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
497 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); 530 offset = skb_shinfo(skb)->frags[i].page_offset;
531
532 /* For a worse-case estimate we need to factor in
533 * the fragment page offset as this will affect the
534 * number of times xenvif_gop_frag_copy() will
535 * call start_new_rx_buffer().
536 */
537 max_slots_needed += DIV_ROUND_UP(offset + size,
538 PAGE_SIZE);
498 } 539 }
540
541 /* To avoid the estimate becoming too pessimal for some
542 * frontends that limit posted rx requests, cap the estimate
543 * at MAX_SKB_FRAGS.
544 */
545 if (max_slots_needed > MAX_SKB_FRAGS)
546 max_slots_needed = MAX_SKB_FRAGS;
547
548 /* We may need one more slot for GSO metadata */
499 if (skb_is_gso(skb) && 549 if (skb_is_gso(skb) &&
500 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 550 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
501 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) 551 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -510,9 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
510 } else 560 } else
511 vif->rx_last_skb_slots = 0; 561 vif->rx_last_skb_slots = 0;
512 562
513 sco = (struct skb_cb_overlay *)skb->cb; 563 old_req_cons = vif->rx.req_cons;
514 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 564 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
515 BUG_ON(sco->meta_slots_used > max_slots_needed); 565 ring_slots_used = vif->rx.req_cons - old_req_cons;
566
567 BUG_ON(ring_slots_used > max_slots_needed);
516 568
517 __skb_queue_tail(&rxq, skb); 569 __skb_queue_tail(&rxq, skb);
518 } 570 }
@@ -526,7 +578,6 @@ static void xenvif_rx_action(struct xenvif *vif)
526 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 578 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
527 579
528 while ((skb = __skb_dequeue(&rxq)) != NULL) { 580 while ((skb = __skb_dequeue(&rxq)) != NULL) {
529 sco = (struct skb_cb_overlay *)skb->cb;
530 581
531 if ((1 << vif->meta[npo.meta_cons].gso_type) & 582 if ((1 << vif->meta[npo.meta_cons].gso_type) &
532 vif->gso_prefix_mask) { 583 vif->gso_prefix_mask) {
@@ -537,19 +588,21 @@ static void xenvif_rx_action(struct xenvif *vif)
537 588
538 resp->offset = vif->meta[npo.meta_cons].gso_size; 589 resp->offset = vif->meta[npo.meta_cons].gso_size;
539 resp->id = vif->meta[npo.meta_cons].id; 590 resp->id = vif->meta[npo.meta_cons].id;
540 resp->status = sco->meta_slots_used; 591 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
541 592
542 npo.meta_cons++; 593 npo.meta_cons++;
543 sco->meta_slots_used--; 594 XENVIF_RX_CB(skb)->meta_slots_used--;
544 } 595 }
545 596
546 597
547 vif->dev->stats.tx_bytes += skb->len; 598 vif->dev->stats.tx_bytes += skb->len;
548 vif->dev->stats.tx_packets++; 599 vif->dev->stats.tx_packets++;
549 600
550 status = xenvif_check_gop(vif, sco->meta_slots_used, &npo); 601 status = xenvif_check_gop(vif,
602 XENVIF_RX_CB(skb)->meta_slots_used,
603 &npo);
551 604
552 if (sco->meta_slots_used == 1) 605 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
553 flags = 0; 606 flags = 0;
554 else 607 else
555 flags = XEN_NETRXF_more_data; 608 flags = XEN_NETRXF_more_data;
@@ -586,13 +639,13 @@ static void xenvif_rx_action(struct xenvif *vif)
586 639
587 xenvif_add_frag_responses(vif, status, 640 xenvif_add_frag_responses(vif, status,
588 vif->meta + npo.meta_cons + 1, 641 vif->meta + npo.meta_cons + 1,
589 sco->meta_slots_used); 642 XENVIF_RX_CB(skb)->meta_slots_used);
590 643
591 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 644 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
592 645
593 need_to_notify |= !!ret; 646 need_to_notify |= !!ret;
594 647
595 npo.meta_cons += sco->meta_slots_used; 648 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
596 dev_kfree_skb(skb); 649 dev_kfree_skb(skb);
597 } 650 }
598 651
@@ -642,9 +695,12 @@ static void xenvif_tx_err(struct xenvif *vif,
642 struct xen_netif_tx_request *txp, RING_IDX end) 695 struct xen_netif_tx_request *txp, RING_IDX end)
643{ 696{
644 RING_IDX cons = vif->tx.req_cons; 697 RING_IDX cons = vif->tx.req_cons;
698 unsigned long flags;
645 699
646 do { 700 do {
701 spin_lock_irqsave(&vif->response_lock, flags);
647 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 702 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
703 spin_unlock_irqrestore(&vif->response_lock, flags);
648 if (cons == end) 704 if (cons == end)
649 break; 705 break;
650 txp = RING_GET_REQUEST(&vif->tx, cons++); 706 txp = RING_GET_REQUEST(&vif->tx, cons++);
@@ -655,7 +711,8 @@ static void xenvif_tx_err(struct xenvif *vif,
655static void xenvif_fatal_tx_err(struct xenvif *vif) 711static void xenvif_fatal_tx_err(struct xenvif *vif)
656{ 712{
657 netdev_err(vif->dev, "fatal error; disabling device\n"); 713 netdev_err(vif->dev, "fatal error; disabling device\n");
658 xenvif_carrier_off(vif); 714 vif->disabled = true;
715 xenvif_kick_thread(vif);
659} 716}
660 717
661static int xenvif_count_requests(struct xenvif *vif, 718static int xenvif_count_requests(struct xenvif *vif,
@@ -756,180 +813,168 @@ static int xenvif_count_requests(struct xenvif *vif,
756 return slots; 813 return slots;
757} 814}
758 815
759static struct page *xenvif_alloc_page(struct xenvif *vif, 816
760 u16 pending_idx) 817struct xenvif_tx_cb {
818 u16 pending_idx;
819};
820
821#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
822
823static inline void xenvif_tx_create_gop(struct xenvif *vif,
824 u16 pending_idx,
825 struct xen_netif_tx_request *txp,
826 struct gnttab_map_grant_ref *gop)
761{ 827{
762 struct page *page; 828 vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
829 gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx),
830 GNTMAP_host_map | GNTMAP_readonly,
831 txp->gref, vif->domid);
832
833 memcpy(&vif->pending_tx_info[pending_idx].req, txp,
834 sizeof(*txp));
835}
763 836
764 page = alloc_page(GFP_ATOMIC|__GFP_COLD); 837static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
765 if (!page) 838{
839 struct sk_buff *skb =
840 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
841 GFP_ATOMIC | __GFP_NOWARN);
842 if (unlikely(skb == NULL))
766 return NULL; 843 return NULL;
767 vif->mmap_pages[pending_idx] = page;
768 844
769 return page; 845 /* Packets passed to netif_rx() must have some headroom. */
846 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
847
848 /* Initialize it here to avoid later surprises */
849 skb_shinfo(skb)->destructor_arg = NULL;
850
851 return skb;
770} 852}
771 853
772static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, 854static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
773 struct sk_buff *skb, 855 struct sk_buff *skb,
774 struct xen_netif_tx_request *txp, 856 struct xen_netif_tx_request *txp,
775 struct gnttab_copy *gop) 857 struct gnttab_map_grant_ref *gop)
776{ 858{
777 struct skb_shared_info *shinfo = skb_shinfo(skb); 859 struct skb_shared_info *shinfo = skb_shinfo(skb);
778 skb_frag_t *frags = shinfo->frags; 860 skb_frag_t *frags = shinfo->frags;
779 u16 pending_idx = *((u16 *)skb->data); 861 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
780 u16 head_idx = 0; 862 int start;
781 int slot, start; 863 pending_ring_idx_t index;
782 struct page *page; 864 unsigned int nr_slots, frag_overflow = 0;
783 pending_ring_idx_t index, start_idx = 0;
784 uint16_t dst_offset;
785 unsigned int nr_slots;
786 struct pending_tx_info *first = NULL;
787 865
788 /* At this point shinfo->nr_frags is in fact the number of 866 /* At this point shinfo->nr_frags is in fact the number of
789 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. 867 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
790 */ 868 */
869 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
870 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
871 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
872 shinfo->nr_frags = MAX_SKB_FRAGS;
873 }
791 nr_slots = shinfo->nr_frags; 874 nr_slots = shinfo->nr_frags;
792 875
793 /* Skip first skb fragment if it is on same page as header fragment. */ 876 /* Skip first skb fragment if it is on same page as header fragment. */
794 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 877 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
795 878
796 /* Coalesce tx requests, at this point the packet passed in 879 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
797 * should be <= 64K. Any packets larger than 64K have been 880 shinfo->nr_frags++, txp++, gop++) {
798 * handled in xenvif_count_requests(). 881 index = pending_index(vif->pending_cons++);
799 */ 882 pending_idx = vif->pending_ring[index];
800 for (shinfo->nr_frags = slot = start; slot < nr_slots; 883 xenvif_tx_create_gop(vif, pending_idx, txp, gop);
801 shinfo->nr_frags++) { 884 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
802 struct pending_tx_info *pending_tx_info = 885 }
803 vif->pending_tx_info;
804 886
805 page = alloc_page(GFP_ATOMIC|__GFP_COLD); 887 if (frag_overflow) {
806 if (!page) 888 struct sk_buff *nskb = xenvif_alloc_skb(0);
807 goto err; 889 if (unlikely(nskb == NULL)) {
808 890 if (net_ratelimit())
809 dst_offset = 0; 891 netdev_err(vif->dev,
810 first = NULL; 892 "Can't allocate the frag_list skb.\n");
811 while (dst_offset < PAGE_SIZE && slot < nr_slots) { 893 return NULL;
812 gop->flags = GNTCOPY_source_gref; 894 }
813 895
814 gop->source.u.ref = txp->gref; 896 shinfo = skb_shinfo(nskb);
815 gop->source.domid = vif->domid; 897 frags = shinfo->frags;
816 gop->source.offset = txp->offset;
817
818 gop->dest.domid = DOMID_SELF;
819
820 gop->dest.offset = dst_offset;
821 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
822
823 if (dst_offset + txp->size > PAGE_SIZE) {
824 /* This page can only merge a portion
825 * of tx request. Do not increment any
826 * pointer / counter here. The txp
827 * will be dealt with in future
828 * rounds, eventually hitting the
829 * `else` branch.
830 */
831 gop->len = PAGE_SIZE - dst_offset;
832 txp->offset += gop->len;
833 txp->size -= gop->len;
834 dst_offset += gop->len; /* quit loop */
835 } else {
836 /* This tx request can be merged in the page */
837 gop->len = txp->size;
838 dst_offset += gop->len;
839
840 index = pending_index(vif->pending_cons++);
841
842 pending_idx = vif->pending_ring[index];
843
844 memcpy(&pending_tx_info[pending_idx].req, txp,
845 sizeof(*txp));
846
847 /* Poison these fields, corresponding
848 * fields for head tx req will be set
849 * to correct values after the loop.
850 */
851 vif->mmap_pages[pending_idx] = (void *)(~0UL);
852 pending_tx_info[pending_idx].head =
853 INVALID_PENDING_RING_IDX;
854
855 if (!first) {
856 first = &pending_tx_info[pending_idx];
857 start_idx = index;
858 head_idx = pending_idx;
859 }
860
861 txp++;
862 slot++;
863 }
864 898
865 gop++; 899 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
900 shinfo->nr_frags++, txp++, gop++) {
901 index = pending_index(vif->pending_cons++);
902 pending_idx = vif->pending_ring[index];
903 xenvif_tx_create_gop(vif, pending_idx, txp, gop);
904 frag_set_pending_idx(&frags[shinfo->nr_frags],
905 pending_idx);
866 } 906 }
867 907
868 first->req.offset = 0; 908 skb_shinfo(skb)->frag_list = nskb;
869 first->req.size = dst_offset;
870 first->head = start_idx;
871 vif->mmap_pages[head_idx] = page;
872 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
873 } 909 }
874 910
875 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
876
877 return gop; 911 return gop;
878err: 912}
879 /* Unwind, freeing all pages and sending error responses. */ 913
880 while (shinfo->nr_frags-- > start) { 914static inline void xenvif_grant_handle_set(struct xenvif *vif,
881 xenvif_idx_release(vif, 915 u16 pending_idx,
882 frag_get_pending_idx(&frags[shinfo->nr_frags]), 916 grant_handle_t handle)
883 XEN_NETIF_RSP_ERROR); 917{
918 if (unlikely(vif->grant_tx_handle[pending_idx] !=
919 NETBACK_INVALID_HANDLE)) {
920 netdev_err(vif->dev,
921 "Trying to overwrite active handle! pending_idx: %x\n",
922 pending_idx);
923 BUG();
884 } 924 }
885 /* The head too, if necessary. */ 925 vif->grant_tx_handle[pending_idx] = handle;
886 if (start) 926}
887 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
888 927
889 return NULL; 928static inline void xenvif_grant_handle_reset(struct xenvif *vif,
929 u16 pending_idx)
930{
931 if (unlikely(vif->grant_tx_handle[pending_idx] ==
932 NETBACK_INVALID_HANDLE)) {
933 netdev_err(vif->dev,
934 "Trying to unmap invalid handle! pending_idx: %x\n",
935 pending_idx);
936 BUG();
937 }
938 vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
890} 939}
891 940
892static int xenvif_tx_check_gop(struct xenvif *vif, 941static int xenvif_tx_check_gop(struct xenvif *vif,
893 struct sk_buff *skb, 942 struct sk_buff *skb,
894 struct gnttab_copy **gopp) 943 struct gnttab_map_grant_ref **gopp)
895{ 944{
896 struct gnttab_copy *gop = *gopp; 945 struct gnttab_map_grant_ref *gop = *gopp;
897 u16 pending_idx = *((u16 *)skb->data); 946 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
898 struct skb_shared_info *shinfo = skb_shinfo(skb); 947 struct skb_shared_info *shinfo = skb_shinfo(skb);
899 struct pending_tx_info *tx_info; 948 struct pending_tx_info *tx_info;
900 int nr_frags = shinfo->nr_frags; 949 int nr_frags = shinfo->nr_frags;
901 int i, err, start; 950 int i, err, start;
902 u16 peek; /* peek into next tx request */ 951 struct sk_buff *first_skb = NULL;
903 952
904 /* Check status of header. */ 953 /* Check status of header. */
905 err = gop->status; 954 err = gop->status;
906 if (unlikely(err)) 955 if (unlikely(err))
907 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 956 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
957 else
958 xenvif_grant_handle_set(vif, pending_idx , gop->handle);
908 959
909 /* Skip first skb fragment if it is on same page as header fragment. */ 960 /* Skip first skb fragment if it is on same page as header fragment. */
910 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 961 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
911 962
963check_frags:
912 for (i = start; i < nr_frags; i++) { 964 for (i = start; i < nr_frags; i++) {
913 int j, newerr; 965 int j, newerr;
914 pending_ring_idx_t head;
915 966
916 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 967 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
917 tx_info = &vif->pending_tx_info[pending_idx]; 968 tx_info = &vif->pending_tx_info[pending_idx];
918 head = tx_info->head;
919 969
920 /* Check error status: if okay then remember grant handle. */ 970 /* Check error status: if okay then remember grant handle. */
921 do { 971 newerr = (++gop)->status;
922 newerr = (++gop)->status;
923 if (newerr)
924 break;
925 peek = vif->pending_ring[pending_index(++head)];
926 } while (!pending_tx_is_head(vif, peek));
927 972
928 if (likely(!newerr)) { 973 if (likely(!newerr)) {
974 xenvif_grant_handle_set(vif, pending_idx , gop->handle);
929 /* Had a previous error? Invalidate this fragment. */ 975 /* Had a previous error? Invalidate this fragment. */
930 if (unlikely(err)) 976 if (unlikely(err))
931 xenvif_idx_release(vif, pending_idx, 977 xenvif_idx_unmap(vif, pending_idx);
932 XEN_NETIF_RSP_OKAY);
933 continue; 978 continue;
934 } 979 }
935 980
@@ -939,20 +984,45 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
939 /* Not the first error? Preceding frags already invalidated. */ 984 /* Not the first error? Preceding frags already invalidated. */
940 if (err) 985 if (err)
941 continue; 986 continue;
942
943 /* First error: invalidate header and preceding fragments. */ 987 /* First error: invalidate header and preceding fragments. */
944 pending_idx = *((u16 *)skb->data); 988 if (!first_skb)
945 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); 989 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
990 else
991 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
992 xenvif_idx_unmap(vif, pending_idx);
946 for (j = start; j < i; j++) { 993 for (j = start; j < i; j++) {
947 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 994 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
948 xenvif_idx_release(vif, pending_idx, 995 xenvif_idx_unmap(vif, pending_idx);
949 XEN_NETIF_RSP_OKAY);
950 } 996 }
951 997
952 /* Remember the error: invalidate all subsequent fragments. */ 998 /* Remember the error: invalidate all subsequent fragments. */
953 err = newerr; 999 err = newerr;
954 } 1000 }
955 1001
1002 if (skb_has_frag_list(skb)) {
1003 first_skb = skb;
1004 skb = shinfo->frag_list;
1005 shinfo = skb_shinfo(skb);
1006 nr_frags = shinfo->nr_frags;
1007 start = 0;
1008
1009 goto check_frags;
1010 }
1011
1012 /* There was a mapping error in the frag_list skb. We have to unmap
1013 * the first skb's frags
1014 */
1015 if (first_skb && err) {
1016 int j;
1017 shinfo = skb_shinfo(first_skb);
1018 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1019 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1020 for (j = start; j < shinfo->nr_frags; j++) {
1021 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1022 xenvif_idx_unmap(vif, pending_idx);
1023 }
1024 }
1025
956 *gopp = gop + 1; 1026 *gopp = gop + 1;
957 return err; 1027 return err;
958} 1028}
@@ -962,6 +1032,10 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
962 struct skb_shared_info *shinfo = skb_shinfo(skb); 1032 struct skb_shared_info *shinfo = skb_shinfo(skb);
963 int nr_frags = shinfo->nr_frags; 1033 int nr_frags = shinfo->nr_frags;
964 int i; 1034 int i;
1035 u16 prev_pending_idx = INVALID_PENDING_IDX;
1036
1037 if (skb_shinfo(skb)->destructor_arg)
1038 prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
965 1039
966 for (i = 0; i < nr_frags; i++) { 1040 for (i = 0; i < nr_frags; i++) {
967 skb_frag_t *frag = shinfo->frags + i; 1041 skb_frag_t *frag = shinfo->frags + i;
@@ -971,6 +1045,17 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
971 1045
972 pending_idx = frag_get_pending_idx(frag); 1046 pending_idx = frag_get_pending_idx(frag);
973 1047
1048 /* If this is not the first frag, chain it to the previous*/
1049 if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
1050 skb_shinfo(skb)->destructor_arg =
1051 &callback_param(vif, pending_idx);
1052 else if (likely(pending_idx != prev_pending_idx))
1053 callback_param(vif, prev_pending_idx).ctx =
1054 &callback_param(vif, pending_idx);
1055
1056 callback_param(vif, pending_idx).ctx = NULL;
1057 prev_pending_idx = pending_idx;
1058
974 txp = &vif->pending_tx_info[pending_idx].req; 1059 txp = &vif->pending_tx_info[pending_idx].req;
975 page = virt_to_page(idx_to_kaddr(vif, pending_idx)); 1060 page = virt_to_page(idx_to_kaddr(vif, pending_idx));
976 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 1061 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
@@ -978,10 +1063,15 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
978 skb->data_len += txp->size; 1063 skb->data_len += txp->size;
979 skb->truesize += txp->size; 1064 skb->truesize += txp->size;
980 1065
981 /* Take an extra reference to offset xenvif_idx_release */ 1066 /* Take an extra reference to offset network stack's put_page */
982 get_page(vif->mmap_pages[pending_idx]); 1067 get_page(vif->mmap_pages[pending_idx]);
983 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
984 } 1068 }
1069 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1070 * overlaps with "index", and "mapping" is not set. I think mapping
1071 * should be set. If delivered to local stack, it would drop this
1072 * skb in sk_filter unless the socket has the right to use it.
1073 */
1074 skb->pfmemalloc = false;
985} 1075}
986 1076
987static int xenvif_get_extras(struct xenvif *vif, 1077static int xenvif_get_extras(struct xenvif *vif,
@@ -1101,16 +1191,13 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1101 1191
1102static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) 1192static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1103{ 1193{
1104 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; 1194 struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
1105 struct sk_buff *skb; 1195 struct sk_buff *skb;
1106 int ret; 1196 int ret;
1107 1197
1108 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX 1198 while (skb_queue_len(&vif->tx_queue) < budget) {
1109 < MAX_PENDING_REQS) &&
1110 (skb_queue_len(&vif->tx_queue) < budget)) {
1111 struct xen_netif_tx_request txreq; 1199 struct xen_netif_tx_request txreq;
1112 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1200 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1113 struct page *page;
1114 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1201 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1115 u16 pending_idx; 1202 u16 pending_idx;
1116 RING_IDX idx; 1203 RING_IDX idx;
@@ -1126,7 +1213,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1126 vif->tx.sring->req_prod, vif->tx.req_cons, 1213 vif->tx.sring->req_prod, vif->tx.req_cons,
1127 XEN_NETIF_TX_RING_SIZE); 1214 XEN_NETIF_TX_RING_SIZE);
1128 xenvif_fatal_tx_err(vif); 1215 xenvif_fatal_tx_err(vif);
1129 continue; 1216 break;
1130 } 1217 }
1131 1218
1132 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); 1219 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
@@ -1186,8 +1273,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1186 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1273 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1187 PKT_PROT_LEN : txreq.size; 1274 PKT_PROT_LEN : txreq.size;
1188 1275
1189 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, 1276 skb = xenvif_alloc_skb(data_len);
1190 GFP_ATOMIC | __GFP_NOWARN);
1191 if (unlikely(skb == NULL)) { 1277 if (unlikely(skb == NULL)) {
1192 netdev_dbg(vif->dev, 1278 netdev_dbg(vif->dev,
1193 "Can't allocate a skb in start_xmit.\n"); 1279 "Can't allocate a skb in start_xmit.\n");
@@ -1195,9 +1281,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1195 break; 1281 break;
1196 } 1282 }
1197 1283
1198 /* Packets passed to netif_rx() must have some headroom. */
1199 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1200
1201 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1284 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1202 struct xen_netif_extra_info *gso; 1285 struct xen_netif_extra_info *gso;
1203 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1286 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1209,31 +1292,11 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1209 } 1292 }
1210 } 1293 }
1211 1294
1212 /* XXX could copy straight to head */ 1295 xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
1213 page = xenvif_alloc_page(vif, pending_idx);
1214 if (!page) {
1215 kfree_skb(skb);
1216 xenvif_tx_err(vif, &txreq, idx);
1217 break;
1218 }
1219
1220 gop->source.u.ref = txreq.gref;
1221 gop->source.domid = vif->domid;
1222 gop->source.offset = txreq.offset;
1223
1224 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1225 gop->dest.domid = DOMID_SELF;
1226 gop->dest.offset = txreq.offset;
1227
1228 gop->len = txreq.size;
1229 gop->flags = GNTCOPY_source_gref;
1230 1296
1231 gop++; 1297 gop++;
1232 1298
1233 memcpy(&vif->pending_tx_info[pending_idx].req, 1299 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1234 &txreq, sizeof(txreq));
1235 vif->pending_tx_info[pending_idx].head = index;
1236 *((u16 *)skb->data) = pending_idx;
1237 1300
1238 __skb_put(skb, data_len); 1301 __skb_put(skb, data_len);
1239 1302
@@ -1261,17 +1324,82 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1261 1324
1262 vif->tx.req_cons = idx; 1325 vif->tx.req_cons = idx;
1263 1326
1264 if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops)) 1327 if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
1265 break; 1328 break;
1266 } 1329 }
1267 1330
1268 return gop - vif->tx_copy_ops; 1331 return gop - vif->tx_map_ops;
1269} 1332}
1270 1333
1334/* Consolidate skb with a frag_list into a brand new one with local pages on
1335 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1336 */
1337static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1338{
1339 unsigned int offset = skb_headlen(skb);
1340 skb_frag_t frags[MAX_SKB_FRAGS];
1341 int i;
1342 struct ubuf_info *uarg;
1343 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1344
1345 vif->tx_zerocopy_sent += 2;
1346 vif->tx_frag_overflow++;
1347
1348 xenvif_fill_frags(vif, nskb);
1349 /* Subtract frags size, we will correct it later */
1350 skb->truesize -= skb->data_len;
1351 skb->len += nskb->len;
1352 skb->data_len += nskb->len;
1353
1354 /* create a brand new frags array and coalesce there */
1355 for (i = 0; offset < skb->len; i++) {
1356 struct page *page;
1357 unsigned int len;
1358
1359 BUG_ON(i >= MAX_SKB_FRAGS);
1360 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
1361 if (!page) {
1362 int j;
1363 skb->truesize += skb->data_len;
1364 for (j = 0; j < i; j++)
1365 put_page(frags[j].page.p);
1366 return -ENOMEM;
1367 }
1368
1369 if (offset + PAGE_SIZE < skb->len)
1370 len = PAGE_SIZE;
1371 else
1372 len = skb->len - offset;
1373 if (skb_copy_bits(skb, offset, page_address(page), len))
1374 BUG();
1375
1376 offset += len;
1377 frags[i].page.p = page;
1378 frags[i].page_offset = 0;
1379 skb_frag_size_set(&frags[i], len);
1380 }
1381 /* swap out with old one */
1382 memcpy(skb_shinfo(skb)->frags,
1383 frags,
1384 i * sizeof(skb_frag_t));
1385 skb_shinfo(skb)->nr_frags = i;
1386 skb->truesize += i * PAGE_SIZE;
1387
1388 /* remove traces of mapped pages and frag_list */
1389 skb_frag_list_init(skb);
1390 uarg = skb_shinfo(skb)->destructor_arg;
1391 uarg->callback(uarg, true);
1392 skb_shinfo(skb)->destructor_arg = NULL;
1393
1394 skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1395 kfree_skb(nskb);
1396
1397 return 0;
1398}
1271 1399
1272static int xenvif_tx_submit(struct xenvif *vif) 1400static int xenvif_tx_submit(struct xenvif *vif)
1273{ 1401{
1274 struct gnttab_copy *gop = vif->tx_copy_ops; 1402 struct gnttab_map_grant_ref *gop = vif->tx_map_ops;
1275 struct sk_buff *skb; 1403 struct sk_buff *skb;
1276 int work_done = 0; 1404 int work_done = 0;
1277 1405
@@ -1280,7 +1408,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
1280 u16 pending_idx; 1408 u16 pending_idx;
1281 unsigned data_len; 1409 unsigned data_len;
1282 1410
1283 pending_idx = *((u16 *)skb->data); 1411 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1284 txp = &vif->pending_tx_info[pending_idx].req; 1412 txp = &vif->pending_tx_info[pending_idx].req;
1285 1413
1286 /* Check the remap error code. */ 1414 /* Check the remap error code. */
@@ -1295,14 +1423,16 @@ static int xenvif_tx_submit(struct xenvif *vif)
1295 memcpy(skb->data, 1423 memcpy(skb->data,
1296 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), 1424 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1297 data_len); 1425 data_len);
1426 callback_param(vif, pending_idx).ctx = NULL;
1298 if (data_len < txp->size) { 1427 if (data_len < txp->size) {
1299 /* Append the packet payload as a fragment. */ 1428 /* Append the packet payload as a fragment. */
1300 txp->offset += data_len; 1429 txp->offset += data_len;
1301 txp->size -= data_len; 1430 txp->size -= data_len;
1431 skb_shinfo(skb)->destructor_arg =
1432 &callback_param(vif, pending_idx);
1302 } else { 1433 } else {
1303 /* Schedule a response immediately. */ 1434 /* Schedule a response immediately. */
1304 xenvif_idx_release(vif, pending_idx, 1435 xenvif_idx_unmap(vif, pending_idx);
1305 XEN_NETIF_RSP_OKAY);
1306 } 1436 }
1307 1437
1308 if (txp->flags & XEN_NETTXF_csum_blank) 1438 if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1312,6 +1442,17 @@ static int xenvif_tx_submit(struct xenvif *vif)
1312 1442
1313 xenvif_fill_frags(vif, skb); 1443 xenvif_fill_frags(vif, skb);
1314 1444
1445 if (unlikely(skb_has_frag_list(skb))) {
1446 if (xenvif_handle_frag_list(vif, skb)) {
1447 if (net_ratelimit())
1448 netdev_err(vif->dev,
1449 "Not enough memory to consolidate frag_list!\n");
1450 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1451 kfree_skb(skb);
1452 continue;
1453 }
1454 }
1455
1315 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { 1456 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1316 int target = min_t(int, skb->len, PKT_PROT_LEN); 1457 int target = min_t(int, skb->len, PKT_PROT_LEN);
1317 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1458 __pskb_pull_tail(skb, target - skb_headlen(skb));
@@ -1324,6 +1465,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
1324 if (checksum_setup(vif, skb)) { 1465 if (checksum_setup(vif, skb)) {
1325 netdev_dbg(vif->dev, 1466 netdev_dbg(vif->dev,
1326 "Can't setup checksum in net_tx_action\n"); 1467 "Can't setup checksum in net_tx_action\n");
1468 /* We have to set this flag to trigger the callback */
1469 if (skb_shinfo(skb)->destructor_arg)
1470 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1327 kfree_skb(skb); 1471 kfree_skb(skb);
1328 continue; 1472 continue;
1329 } 1473 }
@@ -1349,17 +1493,126 @@ static int xenvif_tx_submit(struct xenvif *vif)
1349 1493
1350 work_done++; 1494 work_done++;
1351 1495
1496 /* Set this flag right before netif_receive_skb, otherwise
1497 * someone might think this packet already left netback, and
1498 * do a skb_copy_ubufs while we are still in control of the
1499 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1500 */
1501 if (skb_shinfo(skb)->destructor_arg) {
1502 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1503 vif->tx_zerocopy_sent++;
1504 }
1505
1352 netif_receive_skb(skb); 1506 netif_receive_skb(skb);
1353 } 1507 }
1354 1508
1355 return work_done; 1509 return work_done;
1356} 1510}
1357 1511
1512void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1513{
1514 unsigned long flags;
1515 pending_ring_idx_t index;
1516 struct xenvif *vif = ubuf_to_vif(ubuf);
1517
1518 /* This is the only place where we grab this lock, to protect callbacks
1519 * from each other.
1520 */
1521 spin_lock_irqsave(&vif->callback_lock, flags);
1522 do {
1523 u16 pending_idx = ubuf->desc;
1524 ubuf = (struct ubuf_info *) ubuf->ctx;
1525 BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
1526 MAX_PENDING_REQS);
1527 index = pending_index(vif->dealloc_prod);
1528 vif->dealloc_ring[index] = pending_idx;
1529 /* Sync with xenvif_tx_dealloc_action:
1530 * insert idx then incr producer.
1531 */
1532 smp_wmb();
1533 vif->dealloc_prod++;
1534 } while (ubuf);
1535 wake_up(&vif->dealloc_wq);
1536 spin_unlock_irqrestore(&vif->callback_lock, flags);
1537
1538 if (likely(zerocopy_success))
1539 vif->tx_zerocopy_success++;
1540 else
1541 vif->tx_zerocopy_fail++;
1542}
1543
1544static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1545{
1546 struct gnttab_unmap_grant_ref *gop;
1547 pending_ring_idx_t dc, dp;
1548 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1549 unsigned int i = 0;
1550
1551 dc = vif->dealloc_cons;
1552 gop = vif->tx_unmap_ops;
1553
1554 /* Free up any grants we have finished using */
1555 do {
1556 dp = vif->dealloc_prod;
1557
1558 /* Ensure we see all indices enqueued by all
1559 * xenvif_zerocopy_callback().
1560 */
1561 smp_rmb();
1562
1563 while (dc != dp) {
1564 BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
1565 pending_idx =
1566 vif->dealloc_ring[pending_index(dc++)];
1567
1568 pending_idx_release[gop-vif->tx_unmap_ops] =
1569 pending_idx;
1570 vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
1571 vif->mmap_pages[pending_idx];
1572 gnttab_set_unmap_op(gop,
1573 idx_to_kaddr(vif, pending_idx),
1574 GNTMAP_host_map,
1575 vif->grant_tx_handle[pending_idx]);
1576 xenvif_grant_handle_reset(vif, pending_idx);
1577 ++gop;
1578 }
1579
1580 } while (dp != vif->dealloc_prod);
1581
1582 vif->dealloc_cons = dc;
1583
1584 if (gop - vif->tx_unmap_ops > 0) {
1585 int ret;
1586 ret = gnttab_unmap_refs(vif->tx_unmap_ops,
1587 NULL,
1588 vif->pages_to_unmap,
1589 gop - vif->tx_unmap_ops);
1590 if (ret) {
1591 netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1592 gop - vif->tx_unmap_ops, ret);
1593 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
1594 if (gop[i].status != GNTST_okay)
1595 netdev_err(vif->dev,
1596 " host_addr: %llx handle: %x status: %d\n",
1597 gop[i].host_addr,
1598 gop[i].handle,
1599 gop[i].status);
1600 }
1601 BUG();
1602 }
1603 }
1604
1605 for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
1606 xenvif_idx_release(vif, pending_idx_release[i],
1607 XEN_NETIF_RSP_OKAY);
1608}
1609
1610
1358/* Called after netfront has transmitted */ 1611/* Called after netfront has transmitted */
1359int xenvif_tx_action(struct xenvif *vif, int budget) 1612int xenvif_tx_action(struct xenvif *vif, int budget)
1360{ 1613{
1361 unsigned nr_gops; 1614 unsigned nr_gops;
1362 int work_done; 1615 int work_done, ret;
1363 1616
1364 if (unlikely(!tx_work_todo(vif))) 1617 if (unlikely(!tx_work_todo(vif)))
1365 return 0; 1618 return 0;
@@ -1369,7 +1622,11 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
1369 if (nr_gops == 0) 1622 if (nr_gops == 0)
1370 return 0; 1623 return 0;
1371 1624
1372 gnttab_batch_copy(vif->tx_copy_ops, nr_gops); 1625 ret = gnttab_map_refs(vif->tx_map_ops,
1626 NULL,
1627 vif->pages_to_map,
1628 nr_gops);
1629 BUG_ON(ret);
1373 1630
1374 work_done = xenvif_tx_submit(vif); 1631 work_done = xenvif_tx_submit(vif);
1375 1632
@@ -1380,45 +1637,18 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1380 u8 status) 1637 u8 status)
1381{ 1638{
1382 struct pending_tx_info *pending_tx_info; 1639 struct pending_tx_info *pending_tx_info;
1383 pending_ring_idx_t head; 1640 pending_ring_idx_t index;
1384 u16 peek; /* peek into next tx request */ 1641 unsigned long flags;
1385
1386 BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
1387
1388 /* Already complete? */
1389 if (vif->mmap_pages[pending_idx] == NULL)
1390 return;
1391 1642
1392 pending_tx_info = &vif->pending_tx_info[pending_idx]; 1643 pending_tx_info = &vif->pending_tx_info[pending_idx];
1393 1644 spin_lock_irqsave(&vif->response_lock, flags);
1394 head = pending_tx_info->head; 1645 make_tx_response(vif, &pending_tx_info->req, status);
1395 1646 index = pending_index(vif->pending_prod);
1396 BUG_ON(!pending_tx_is_head(vif, head)); 1647 vif->pending_ring[index] = pending_idx;
1397 BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx); 1648 /* TX shouldn't use the index before we give it back here */
1398 1649 mb();
1399 do { 1650 vif->pending_prod++;
1400 pending_ring_idx_t index; 1651 spin_unlock_irqrestore(&vif->response_lock, flags);
1401 pending_ring_idx_t idx = pending_index(head);
1402 u16 info_idx = vif->pending_ring[idx];
1403
1404 pending_tx_info = &vif->pending_tx_info[info_idx];
1405 make_tx_response(vif, &pending_tx_info->req, status);
1406
1407 /* Setting any number other than
1408 * INVALID_PENDING_RING_IDX indicates this slot is
1409 * starting a new packet / ending a previous packet.
1410 */
1411 pending_tx_info->head = 0;
1412
1413 index = pending_index(vif->pending_prod++);
1414 vif->pending_ring[index] = vif->pending_ring[info_idx];
1415
1416 peek = vif->pending_ring[pending_index(++head)];
1417
1418 } while (!pending_tx_is_head(vif, peek));
1419
1420 put_page(vif->mmap_pages[pending_idx]);
1421 vif->mmap_pages[pending_idx] = NULL;
1422} 1652}
1423 1653
1424 1654
@@ -1466,23 +1696,54 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1466 return resp; 1696 return resp;
1467} 1697}
1468 1698
1699void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1700{
1701 int ret;
1702 struct gnttab_unmap_grant_ref tx_unmap_op;
1703
1704 gnttab_set_unmap_op(&tx_unmap_op,
1705 idx_to_kaddr(vif, pending_idx),
1706 GNTMAP_host_map,
1707 vif->grant_tx_handle[pending_idx]);
1708 xenvif_grant_handle_reset(vif, pending_idx);
1709
1710 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1711 &vif->mmap_pages[pending_idx], 1);
1712 if (ret) {
1713 netdev_err(vif->dev,
1714 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1715 ret,
1716 pending_idx,
1717 tx_unmap_op.host_addr,
1718 tx_unmap_op.handle,
1719 tx_unmap_op.status);
1720 BUG();
1721 }
1722
1723 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1724}
1725
1469static inline int rx_work_todo(struct xenvif *vif) 1726static inline int rx_work_todo(struct xenvif *vif)
1470{ 1727{
1471 return !skb_queue_empty(&vif->rx_queue) && 1728 return (!skb_queue_empty(&vif->rx_queue) &&
1472 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots); 1729 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
1730 vif->rx_queue_purge;
1473} 1731}
1474 1732
1475static inline int tx_work_todo(struct xenvif *vif) 1733static inline int tx_work_todo(struct xenvif *vif)
1476{ 1734{
1477 1735
1478 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && 1736 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1479 (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1480 < MAX_PENDING_REQS))
1481 return 1; 1737 return 1;
1482 1738
1483 return 0; 1739 return 0;
1484} 1740}
1485 1741
1742static inline bool tx_dealloc_work_todo(struct xenvif *vif)
1743{
1744 return vif->dealloc_cons != vif->dealloc_prod;
1745}
1746
1486void xenvif_unmap_frontend_rings(struct xenvif *vif) 1747void xenvif_unmap_frontend_rings(struct xenvif *vif)
1487{ 1748{
1488 if (vif->tx.sring) 1749 if (vif->tx.sring)
@@ -1540,7 +1801,7 @@ static void xenvif_start_queue(struct xenvif *vif)
1540 netif_wake_queue(vif->dev); 1801 netif_wake_queue(vif->dev);
1541} 1802}
1542 1803
1543int xenvif_kthread(void *data) 1804int xenvif_kthread_guest_rx(void *data)
1544{ 1805{
1545 struct xenvif *vif = data; 1806 struct xenvif *vif = data;
1546 struct sk_buff *skb; 1807 struct sk_buff *skb;
@@ -1548,16 +1809,34 @@ int xenvif_kthread(void *data)
1548 while (!kthread_should_stop()) { 1809 while (!kthread_should_stop()) {
1549 wait_event_interruptible(vif->wq, 1810 wait_event_interruptible(vif->wq,
1550 rx_work_todo(vif) || 1811 rx_work_todo(vif) ||
1812 vif->disabled ||
1551 kthread_should_stop()); 1813 kthread_should_stop());
1814
1815 /* This frontend is found to be rogue, disable it in
1816 * kthread context. Currently this is only set when
1817 * netback finds out frontend sends malformed packet,
1818 * but we cannot disable the interface in softirq
1819 * context so we defer it here.
1820 */
1821 if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
1822 xenvif_carrier_off(vif);
1823
1552 if (kthread_should_stop()) 1824 if (kthread_should_stop())
1553 break; 1825 break;
1554 1826
1827 if (vif->rx_queue_purge) {
1828 skb_queue_purge(&vif->rx_queue);
1829 vif->rx_queue_purge = false;
1830 }
1831
1555 if (!skb_queue_empty(&vif->rx_queue)) 1832 if (!skb_queue_empty(&vif->rx_queue))
1556 xenvif_rx_action(vif); 1833 xenvif_rx_action(vif);
1557 1834
1558 if (skb_queue_empty(&vif->rx_queue) && 1835 if (skb_queue_empty(&vif->rx_queue) &&
1559 netif_queue_stopped(vif->dev)) 1836 netif_queue_stopped(vif->dev)) {
1837 del_timer_sync(&vif->wake_queue);
1560 xenvif_start_queue(vif); 1838 xenvif_start_queue(vif);
1839 }
1561 1840
1562 cond_resched(); 1841 cond_resched();
1563 } 1842 }
@@ -1569,6 +1848,28 @@ int xenvif_kthread(void *data)
1569 return 0; 1848 return 0;
1570} 1849}
1571 1850
1851int xenvif_dealloc_kthread(void *data)
1852{
1853 struct xenvif *vif = data;
1854
1855 while (!kthread_should_stop()) {
1856 wait_event_interruptible(vif->dealloc_wq,
1857 tx_dealloc_work_todo(vif) ||
1858 kthread_should_stop());
1859 if (kthread_should_stop())
1860 break;
1861
1862 xenvif_tx_dealloc_action(vif);
1863 cond_resched();
1864 }
1865
1866 /* Unmap anything remaining*/
1867 if (tx_dealloc_work_todo(vif))
1868 xenvif_tx_dealloc_action(vif);
1869
1870 return 0;
1871}
1872
1572static int __init netback_init(void) 1873static int __init netback_init(void)
1573{ 1874{
1574 int rc = 0; 1875 int rc = 0;
@@ -1586,6 +1887,8 @@ static int __init netback_init(void)
1586 if (rc) 1887 if (rc)
1587 goto failed_init; 1888 goto failed_init;
1588 1889
1890 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
1891
1589 return 0; 1892 return 0;
1590 1893
1591failed_init: 1894failed_init:
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e30d80033cbc..057b05700f8b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -658,7 +658,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
658 658
659 drop: 659 drop:
660 dev->stats.tx_dropped++; 660 dev->stats.tx_dropped++;
661 dev_kfree_skb(skb); 661 dev_kfree_skb_any(skb);
662 return NETDEV_TX_OK; 662 return NETDEV_TX_OK;
663} 663}
664 664
@@ -1060,13 +1060,13 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1060 unsigned int start; 1060 unsigned int start;
1061 1061
1062 do { 1062 do {
1063 start = u64_stats_fetch_begin_bh(&stats->syncp); 1063 start = u64_stats_fetch_begin_irq(&stats->syncp);
1064 1064
1065 rx_packets = stats->rx_packets; 1065 rx_packets = stats->rx_packets;
1066 tx_packets = stats->tx_packets; 1066 tx_packets = stats->tx_packets;
1067 rx_bytes = stats->rx_bytes; 1067 rx_bytes = stats->rx_bytes;
1068 tx_bytes = stats->tx_bytes; 1068 tx_bytes = stats->tx_bytes;
1069 } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); 1069 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1070 1070
1071 tot->rx_packets += rx_packets; 1071 tot->rx_packets += rx_packets;
1072 tot->tx_packets += tx_packets; 1072 tot->tx_packets += tx_packets;
@@ -1282,16 +1282,10 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1282 np->rx_refill_timer.function = rx_refill_timeout; 1282 np->rx_refill_timer.function = rx_refill_timeout;
1283 1283
1284 err = -ENOMEM; 1284 err = -ENOMEM;
1285 np->stats = alloc_percpu(struct netfront_stats); 1285 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1286 if (np->stats == NULL) 1286 if (np->stats == NULL)
1287 goto exit; 1287 goto exit;
1288 1288
1289 for_each_possible_cpu(i) {
1290 struct netfront_stats *xen_nf_stats;
1291 xen_nf_stats = per_cpu_ptr(np->stats, i);
1292 u64_stats_init(&xen_nf_stats->syncp);
1293 }
1294
1295 /* Initialise tx_skbs as a free chain containing every entry. */ 1289 /* Initialise tx_skbs as a free chain containing every entry. */
1296 np->tx_skb_freelist = 0; 1290 np->tx_skb_freelist = 0;
1297 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1291 for (i = 0; i < NET_TX_RING_SIZE; i++) {